repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mne-tools/mne-python | mne/preprocessing/ecg.py | 9 | 15061 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
from ..annotations import _annotations_starts_stops
from ..utils import logger, verbose, sum_squared, warn, int_like
from ..filter import filter_data
from ..epochs import Epochs, BaseEpochs
from ..io.base import BaseRaw
from ..evoked import Evoked
from ..io import RawArray
from ..io.meas_info import create_info
from ..io.pick import _picks_to_idx, pick_types, pick_channels
@verbose
def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3,
l_freq=5, h_freq=35, tstart=0, filter_length='10s',
verbose=None):
"""Detect QRS component in ECG channels.
QRS is the main wave on the heart beat.
Parameters
----------
sfreq : float
Sampling rate
ecg : array
ECG signal
thresh_value : float | str
qrs detection threshold. Can also be "auto" for automatic
selection of threshold.
levels : float
number of std from mean to include for detection
n_thresh : int
max number of crossings
l_freq : float
Low pass frequency
h_freq : float
High pass frequency
%(tstart_ecg)s
%(filter_length_ecg)s
%(verbose)s
Returns
-------
events : array
Indices of ECG peaks.
"""
win_size = int(round((60.0 * sfreq) / 120.0))
filtecg = filter_data(ecg, sfreq, l_freq, h_freq, None, filter_length,
0.5, 0.5, phase='zero-double', fir_window='hann',
fir_design='firwin2')
ecg_abs = np.abs(filtecg)
init = int(sfreq)
n_samples_start = int(sfreq * tstart)
ecg_abs = ecg_abs[n_samples_start:]
n_points = len(ecg_abs)
maxpt = np.empty(3)
maxpt[0] = np.max(ecg_abs[:init])
maxpt[1] = np.max(ecg_abs[init:init * 2])
maxpt[2] = np.max(ecg_abs[init * 2:init * 3])
init_max = np.mean(maxpt)
if thresh_value == 'auto':
thresh_runs = np.arange(0.3, 1.1, 0.05)
elif isinstance(thresh_value, str):
raise ValueError('threshold value must be "auto" or a float')
else:
thresh_runs = [thresh_value]
# Try a few thresholds (or just one)
clean_events = list()
for thresh_value in thresh_runs:
thresh1 = init_max * thresh_value
numcross = list()
time = list()
rms = list()
ii = 0
while ii < (n_points - win_size):
window = ecg_abs[ii:ii + win_size]
if window[0] > thresh1:
max_time = np.argmax(window)
time.append(ii + max_time)
nx = np.sum(np.diff(((window > thresh1).astype(np.int64) ==
1).astype(int)))
numcross.append(nx)
rms.append(np.sqrt(sum_squared(window) / window.size))
ii += win_size
else:
ii += 1
if len(rms) == 0:
rms.append(0.0)
time.append(0.0)
time = np.array(time)
rms_mean = np.mean(rms)
rms_std = np.std(rms)
rms_thresh = rms_mean + (rms_std * levels)
b = np.where(rms < rms_thresh)[0]
a = np.array(numcross)[b]
ce = time[b[a < n_thresh]]
ce += n_samples_start
if ce.size > 0: # We actually found an event
clean_events.append(ce)
if clean_events:
# pick the best threshold; first get effective heart rates
rates = np.array([60. * len(cev) / (len(ecg) / float(sfreq))
for cev in clean_events])
# now find heart rates that seem reasonable (infant through adult
# athlete)
idx = np.where(np.logical_and(rates <= 160., rates >= 40.))[0]
if idx.size > 0:
ideal_rate = np.median(rates[idx]) # get close to the median
else:
ideal_rate = 80. # get close to a reasonable default
idx = np.argmin(np.abs(rates - ideal_rate))
clean_events = clean_events[idx]
else:
clean_events = np.array([])
return clean_events
@verbose
def find_ecg_events(raw, event_id=999, ch_name=None, tstart=0.0,
l_freq=5, h_freq=35, qrs_threshold='auto',
filter_length='10s', return_ecg=False,
reject_by_annotation=True, verbose=None):
"""Find ECG events by localizing the R wave peaks.
Parameters
----------
raw : instance of Raw
The raw data.
%(event_id_ecg)s
%(ch_name_ecg)s
%(tstart_ecg)s
%(l_freq_ecg_filter)s
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
%(filter_length_ecg)s
return_ecg : bool
Return the ECG data. This is especially useful if no ECG channel
is present in the input data, so one will be synthesized. Defaults to
``False``.
%(reject_by_annotation_all)s
.. versionadded:: 0.18
%(verbose)s
Returns
-------
ecg_events : array
The events corresponding to the peaks of the R waves.
ch_ecg : string
Name of channel used.
average_pulse : float
The estimated average pulse. If no ECG events could be found, this will
be zero.
ecg : array | None
The ECG data of the synthesized ECG channel, if any. This will only
be returned if ``return_ecg=True`` was passed.
See Also
--------
create_ecg_epochs
compute_proj_ecg
"""
skip_by_annotation = ('edge', 'bad') if reject_by_annotation else ()
del reject_by_annotation
idx_ecg = _get_ecg_channel_index(ch_name, raw)
if idx_ecg is not None:
logger.info('Using channel %s to identify heart beats.'
% raw.ch_names[idx_ecg])
ecg = raw.get_data(picks=idx_ecg)
else:
ecg, _ = _make_ecg(raw, start=None, stop=None)
assert ecg.ndim == 2 and ecg.shape[0] == 1
ecg = ecg[0]
# Deal with filtering the same way we do in raw, i.e. filter each good
# segment
onsets, ends = _annotations_starts_stops(
raw, skip_by_annotation, 'reject_by_annotation', invert=True)
ecgs = list()
max_idx = (ends - onsets).argmax()
for si, (start, stop) in enumerate(zip(onsets, ends)):
# Only output filter params once (for info level), and only warn
# once about the length criterion (longest segment is too short)
use_verbose = verbose if si == max_idx else 'error'
ecgs.append(filter_data(
ecg[start:stop], raw.info['sfreq'], l_freq, h_freq, [0],
filter_length, 0.5, 0.5, 1, 'fir', None, copy=False,
phase='zero-double', fir_window='hann', fir_design='firwin2',
verbose=use_verbose))
ecg = np.concatenate(ecgs)
# detecting QRS and generating events. Since not user-controlled, don't
# output filter params here (hardcode verbose=False)
ecg_events = qrs_detector(raw.info['sfreq'], ecg, tstart=tstart,
thresh_value=qrs_threshold, l_freq=None,
h_freq=None, verbose=False)
# map ECG events back to original times
remap = np.empty(len(ecg), int)
offset = 0
for start, stop in zip(onsets, ends):
this_len = stop - start
assert this_len >= 0
remap[offset:offset + this_len] = np.arange(start, stop)
offset += this_len
assert offset == len(ecg)
if ecg_events.size > 0:
ecg_events = remap[ecg_events]
else:
ecg_events = np.array([])
n_events = len(ecg_events)
duration_sec = len(ecg) / raw.info['sfreq'] - tstart
duration_min = duration_sec / 60.
average_pulse = n_events / duration_min
logger.info("Number of ECG events detected : %d (average pulse %d / "
"min.)" % (n_events, average_pulse))
ecg_events = np.array([ecg_events + raw.first_samp,
np.zeros(n_events, int),
event_id * np.ones(n_events, int)]).T
out = (ecg_events, idx_ecg, average_pulse)
ecg = ecg[np.newaxis] # backward compat output 2D
if return_ecg:
out += (ecg,)
return out
def _get_ecg_channel_index(ch_name, inst):
"""Get ECG channel index, if no channel found returns None."""
if ch_name is None:
ecg_idx = pick_types(inst.info, meg=False, eeg=False, stim=False,
eog=False, ecg=True, emg=False, ref_meg=False,
exclude='bads')
else:
if ch_name not in inst.ch_names:
raise ValueError('%s not in channel list (%s)' %
(ch_name, inst.ch_names))
ecg_idx = pick_channels(inst.ch_names, include=[ch_name])
if len(ecg_idx) == 0:
return None
# raise RuntimeError('No ECG channel found. Please specify ch_name '
# 'parameter e.g. MEG 1531')
if len(ecg_idx) > 1:
warn('More than one ECG channel found. Using only %s.'
% inst.ch_names[ecg_idx[0]])
return ecg_idx[0]
@verbose
def create_ecg_epochs(raw, ch_name=None, event_id=999, picks=None, tmin=-0.5,
tmax=0.5, l_freq=8, h_freq=16, reject=None, flat=None,
baseline=None, preload=True, keep_ecg=False,
reject_by_annotation=True, decim=1, verbose=None):
"""Conveniently generate epochs around ECG artifact events.
%(create_ecg_epochs)s
.. note:: Filtering is only applied to the ECG channel while finding
events. The resulting ``ecg_epochs`` will have no filtering
applied (i.e., have the same filter properties as the input
``raw`` instance).
Parameters
----------
raw : instance of Raw
The raw data.
%(ch_name_ecg)s
%(event_id_ecg)s
%(picks_all)s
tmin : float
Start time before event.
tmax : float
End time after event.
%(l_freq_ecg_filter)s
%(reject_epochs)s
%(flat)s
%(baseline_epochs)s
preload : bool
Preload epochs or not (default True). Must be True if
keep_ecg is True.
keep_ecg : bool
When ECG is synthetically created (after picking), should it be added
to the epochs? Must be False when synthetic channel is not used.
Defaults to False.
%(reject_by_annotation_epochs)s
.. versionadded:: 0.14.0
%(decim)s
.. versionadded:: 0.21.0
%(verbose)s
Returns
-------
ecg_epochs : instance of Epochs
Data epoched around ECG R wave peaks.
See Also
--------
find_ecg_events
compute_proj_ecg
Notes
-----
If you already have a list of R-peak times, or want to compute R-peaks
outside MNE-Python using a different algorithm, the recommended approach is
to call the :class:`~mne.Epochs` constructor directly, with your R-peaks
formatted as an :term:`events` array (here we also demonstrate the relevant
default values)::
mne.Epochs(raw, r_peak_events_array, tmin=-0.5, tmax=0.5,
baseline=None, preload=True, proj=False) # doctest: +SKIP
"""
has_ecg = 'ecg' in raw or ch_name is not None
if keep_ecg and (has_ecg or not preload):
raise ValueError('keep_ecg can be True only if the ECG channel is '
'created synthetically and preload=True.')
events, _, _, ecg = find_ecg_events(
raw, ch_name=ch_name, event_id=event_id, l_freq=l_freq, h_freq=h_freq,
return_ecg=True, reject_by_annotation=reject_by_annotation)
picks = _picks_to_idx(raw.info, picks, 'all', exclude=())
# create epochs around ECG events and baseline (important)
ecg_epochs = Epochs(raw, events=events, event_id=event_id,
tmin=tmin, tmax=tmax, proj=False, flat=flat,
picks=picks, reject=reject, baseline=baseline,
reject_by_annotation=reject_by_annotation,
preload=preload, decim=decim)
if keep_ecg:
# We know we have created a synthetic channel and epochs are preloaded
ecg_raw = RawArray(
ecg, create_info(ch_names=['ECG-SYN'],
sfreq=raw.info['sfreq'], ch_types=['ecg']),
first_samp=raw.first_samp)
with ecg_raw.info._unlock():
ignore = ['ch_names', 'chs', 'nchan', 'bads']
for k, v in raw.info.items():
if k not in ignore:
ecg_raw.info[k] = v
syn_epochs = Epochs(ecg_raw, events=ecg_epochs.events,
event_id=event_id, tmin=tmin, tmax=tmax,
proj=False, picks=[0], baseline=baseline,
decim=decim, preload=True)
ecg_epochs = ecg_epochs.add_channels([syn_epochs])
return ecg_epochs
@verbose
def _make_ecg(inst, start, stop, reject_by_annotation=False, verbose=None):
"""Create ECG signal from cross channel average."""
if not any(c in inst for c in ['mag', 'grad']):
raise ValueError('Unable to generate artificial ECG channel')
for ch in ['mag', 'grad']:
if ch in inst:
break
logger.info('Reconstructing ECG signal from {}'
.format({'mag': 'Magnetometers',
'grad': 'Gradiometers'}[ch]))
picks = pick_types(inst.info, meg=ch, eeg=False, ref_meg=False)
# Handle start/stop
msg = ('integer arguments for the start and stop parameters are '
'not supported for Epochs and Evoked objects. Please '
'consider using float arguments specifying start and stop '
'time in seconds.')
begin_param_name = 'tmin'
if isinstance(start, int_like):
if isinstance(inst, BaseRaw):
# Raw has start param, can just use int
begin_param_name = 'start'
else:
raise ValueError(msg)
end_param_name = 'tmax'
if isinstance(start, int_like):
if isinstance(inst, BaseRaw):
# Raw has stop param, can just use int
end_param_name = 'stop'
else:
raise ValueError(msg)
kwargs = {begin_param_name: start, end_param_name: stop}
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
ecg, times = inst.get_data(picks, return_times=True, **kwargs,
reject_by_annotation=reject_by_annotation)
elif isinstance(inst, BaseEpochs):
ecg = np.hstack(inst.copy().get_data(picks, **kwargs))
times = inst.times
elif isinstance(inst, Evoked):
ecg = inst.get_data(picks, **kwargs)
times = inst.times
return ecg.mean(0, keepdims=True), times
| bsd-3-clause | bf387dc0a62472a02cd9cb6dcf8e3989 | 34.437647 | 79 | 0.57712 | 3.52469 | false | false | false | false |
ergo/ziggurat_foundations | ziggurat_foundations/models/resource.py | 1 | 3700 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declared_attr
from ziggurat_foundations.exc import ZigguratException
from ziggurat_foundations.models.base import BaseModel
__all__ = ["ResourceMixin"]
class ResourceMixin(BaseModel):
"""
Mixin for Resource model
"""
__possible_permissions__ = ()
@declared_attr
def __tablename__(self):
return "resources"
@declared_attr
def resource_id(self):
return sa.Column(
sa.Integer(), primary_key=True, nullable=False, autoincrement=True
)
@declared_attr
def parent_id(self):
return sa.Column(
sa.Integer(),
sa.ForeignKey(
"resources.resource_id", onupdate="CASCADE", ondelete="SET NULL"
),
)
@declared_attr
def ordering(self):
return sa.Column(sa.Integer(), default=0, nullable=False)
@declared_attr
def resource_name(self):
return sa.Column(sa.Unicode(100), nullable=False)
@declared_attr
def resource_type(self):
return sa.Column(sa.Unicode(30), nullable=False)
@declared_attr
def owner_group_id(self):
return sa.Column(
sa.Integer,
sa.ForeignKey("groups.id", onupdate="CASCADE", ondelete="SET NULL"),
index=True,
)
@declared_attr
def owner_user_id(self):
return sa.Column(
sa.Integer,
sa.ForeignKey("users.id", onupdate="CASCADE", ondelete="SET NULL"),
index=True,
)
@declared_attr
def group_permissions(self):
""" returns all group permissions for this resource"""
return sa.orm.relationship(
"GroupResourcePermission",
cascade="all, delete-orphan",
passive_deletes=True,
passive_updates=True,
)
@declared_attr
def user_permissions(self):
""" returns all user permissions for this resource"""
return sa.orm.relationship(
"UserResourcePermission",
cascade="all, delete-orphan",
passive_deletes=True,
passive_updates=True,
)
@declared_attr
def groups(self):
""" returns all groups that have permissions for this resource"""
return sa.orm.relationship(
"Group",
secondary="groups_resources_permissions",
passive_deletes=True,
passive_updates=True,
)
@declared_attr
def users(self):
""" returns all users that have permissions for this resource"""
return sa.orm.relationship(
"User",
secondary="users_resources_permissions",
passive_deletes=True,
passive_updates=True,
)
__mapper_args__ = {"polymorphic_on": resource_type}
__table_args__ = {"mysql_engine": "InnoDB", "mysql_charset": "utf8"}
def __repr__(self):
return "<Resource: %s, %s, id: %s position: %s>" % (
self.resource_type,
self.resource_name,
self.resource_id,
self.ordering,
)
@property
def __acl__(self):
raise ZigguratException("Model should implement __acl__")
@sa.orm.validates("user_permissions", "group_permissions")
def validate_permission(self, key, permission):
""" validate if resource can have specific permission """
if permission.perm_name not in self.__possible_permissions__:
raise AssertionError(
"perm_name is not one of {}".format(self.__possible_permissions__)
)
return permission
| bsd-3-clause | fee78a4d64d2724612683a69c5b0ba80 | 27.682171 | 82 | 0.585135 | 4.32243 | false | false | false | false |
mne-tools/mne-python | mne/viz/tests/test_misc.py | 5 | 11961 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import matplotlib.pyplot as plt
from mne import (read_events, read_cov, read_source_spaces, read_evokeds,
read_dipole, SourceEstimate, pick_events)
from mne.chpi import compute_chpi_snr
from mne.datasets import testing
from mne.filter import create_filter
from mne.io import read_raw_fif
from mne.minimum_norm import read_inverse_operator
from mne.viz import (plot_bem, plot_events, plot_source_spectrogram,
plot_snr_estimate, plot_filter, plot_csd, plot_chpi_snr)
from mne.viz.misc import _handle_event_colors
from mne.viz.utils import _get_color_list
from mne.utils import requires_nibabel
from mne.time_frequency import CrossSpectralDensity
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
src_fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
inv_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-meg-inv.fif')
evoked_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
dip_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
chpi_fif_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _get_raw():
"""Get raw data."""
return read_raw_fif(raw_fname, preload=True)
def _get_events():
"""Get events."""
return read_events(event_fname)
def test_plot_filter():
"""Test filter plotting."""
l_freq, h_freq, sfreq = 2., 40., 1000.
data = np.zeros(5000)
freq = [0, 2, 40, 50, 500]
gain = [0, 1, 1, 0, 0]
h = create_filter(data, sfreq, l_freq, h_freq, fir_design='firwin2')
plot_filter(h, sfreq)
plt.close('all')
plot_filter(h, sfreq, freq, gain)
plt.close('all')
iir = create_filter(data, sfreq, l_freq, h_freq, method='iir')
plot_filter(iir, sfreq)
plt.close('all')
iir = create_filter(data, sfreq, l_freq, h_freq,
method='iir', iir_params={'output': 'ba'}
)
plot_filter(iir, sfreq, compensate=True)
plt.close('all')
iir = create_filter(data, sfreq, l_freq, h_freq,
method='iir', iir_params={'output': 'sos'}
)
plot_filter(iir, sfreq, compensate=True)
plt.close('all')
plot_filter(iir, sfreq, freq, gain)
plt.close('all')
iir_ba = create_filter(data, sfreq, l_freq, h_freq, method='iir',
iir_params=dict(output='ba'))
plot_filter(iir_ba, sfreq, freq, gain)
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear')
assert len(fig.axes) == 3
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=('time', 'delay'))
assert len(fig.axes) == 2
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=['magnitude', 'delay'])
assert len(fig.axes) == 2
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot='magnitude')
assert len(fig.axes) == 1
plt.close('all')
fig = plot_filter(h, sfreq, freq, gain, fscale='linear',
plot=('magnitude'))
assert len(fig.axes) == 1
plt.close('all')
with pytest.raises(ValueError, match='Invalid value for the .plot'):
plot_filter(h, sfreq, freq, gain, plot=('turtles'))
_, axes = plt.subplots(1)
fig = plot_filter(h, sfreq, freq, gain, plot=('magnitude'), axes=axes)
assert len(fig.axes) == 1
_, axes = plt.subplots(2)
fig = plot_filter(h, sfreq, freq, gain, plot=('magnitude', 'delay'),
axes=axes)
assert len(fig.axes) == 2
plt.close('all')
_, axes = plt.subplots(1)
with pytest.raises(ValueError, match='Length of axes'):
plot_filter(h, sfreq, freq, gain,
plot=('magnitude', 'delay'), axes=axes)
def test_plot_cov():
"""Test plotting of covariances."""
raw = _get_raw()
cov = read_cov(cov_fname)
with pytest.warns(RuntimeWarning, match='projection'):
fig1, fig2 = cov.plot(raw.info, proj=True, exclude=raw.ch_names[6:])
# test complex numbers
cov['data'] = cov.data * (1 + 1j)
fig1, fig2 = cov.plot(raw.info)
@testing.requires_testing_data
@requires_nibabel()
def test_plot_bem():
"""Test plotting of BEM contours."""
with pytest.raises(IOError, match='MRI file .* not found'):
plot_bem(subject='bad-subject', subjects_dir=subjects_dir)
with pytest.raises(ValueError, match="Invalid value for the 'orientation"):
plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='bad-ori')
with pytest.raises(ValueError, match="sorted 1D array"):
plot_bem(subject='sample', subjects_dir=subjects_dir, slices=[0, 500])
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='sagittal', slices=[25, 50])
assert len(fig.axes) == 2
assert len(fig.axes[0].collections) == 3 # 3 BEM surfaces ...
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', brain_surfaces='white')
assert len(fig.axes[0].collections) == 5 # 3 BEM surfaces + 2 hemis
fig = plot_bem(subject='sample', subjects_dir=subjects_dir,
orientation='coronal', slices=[25, 50], src=src_fname)
assert len(fig.axes[0].collections) == 4 # 3 BEM surfaces + 1 src contour
with pytest.raises(ValueError, match='MRI coordinates, got head'):
plot_bem(subject='sample', subjects_dir=subjects_dir,
src=inv_fname)
def test_event_colors():
"""Test color assignment."""
events = pick_events(_get_events(), include=[1, 2])
unique_events = set(events[:, 2])
# make sure defaults work
colors = _handle_event_colors(None, unique_events, dict())
default_colors = _get_color_list()
assert colors[1] == default_colors[0]
# make sure custom color overrides default
colors = _handle_event_colors(color_dict=dict(foo='k', bar='#facade'),
unique_events=unique_events,
event_id=dict(foo=1, bar=2))
assert colors[1] == 'k'
assert colors[2] == '#facade'
def test_plot_events():
"""Test plotting events."""
event_labels = {'aud_l': 1, 'aud_r': 2, 'vis_l': 3, 'vis_r': 4}
color = {1: 'green', 2: 'yellow', 3: 'red', 4: 'c'}
raw = _get_raw()
events = _get_events()
fig = plot_events(events, raw.info['sfreq'], raw.first_samp)
assert fig.axes[0].get_legend() is not None # legend even with no event_id
plot_events(events, raw.info['sfreq'], raw.first_samp, equal_spacing=False)
# Test plotting events without sfreq
plot_events(events, first_samp=raw.first_samp)
with pytest.warns(RuntimeWarning, match='will be ignored'):
fig = plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels)
assert fig.axes[0].get_legend() is not None
with pytest.warns(RuntimeWarning, match='Color was not assigned'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
color=color)
with pytest.warns(RuntimeWarning, match=r'vent \d+ missing from event_id'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=event_labels, color=color)
multimatch = r'event \d+ missing from event_id|in the color dict but is'
with pytest.warns(RuntimeWarning, match=multimatch):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id={'aud_l': 1}, color=color)
extra_id = {'missing': 111}
with pytest.raises(ValueError, match='from event_id is not present in'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id)
with pytest.raises(RuntimeError, match='No usable event IDs'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
extra_id = {'aud_l': 1, 'missing': 111}
with pytest.warns(RuntimeWarning, match='from event_id is not present in'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='warn')
with pytest.warns(RuntimeWarning, match='event 2 missing'):
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
events = events[events[:, 2] == 1]
assert len(events) > 0
plot_events(events, raw.info['sfreq'], raw.first_samp,
event_id=extra_id, on_missing='ignore')
with pytest.raises(ValueError, match='No events'):
plot_events(np.empty((0, 3)))
@testing.requires_testing_data
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram."""
sample_src = read_source_spaces(op.join(subjects_dir, 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_times = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_times))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
pytest.raises(ValueError, plot_source_spectrogram, [], [])
pytest.raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmin=0)
pytest.raises(ValueError, plot_source_spectrogram, [stc, stc],
[[1, 2], [3, 4]], tmax=7)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_plot_snr():
"""Test plotting SNR estimate."""
inv = read_inverse_operator(inv_fname)
evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0]
plot_snr_estimate(evoked, inv)
@testing.requires_testing_data
def test_plot_dipole_amplitudes():
"""Test plotting dipole amplitudes."""
dipoles = read_dipole(dip_fname)
dipoles.plot_amplitudes(show=False)
def test_plot_csd():
"""Test plotting of CSD matrices."""
csd = CrossSpectralDensity([1, 2, 3], ['CH1', 'CH2'],
frequencies=[(10, 20)], n_fft=1,
tmin=0, tmax=1,)
plot_csd(csd, mode='csd') # Plot cross-spectral density
plot_csd(csd, mode='coh') # Plot coherence
@pytest.mark.slowtest # Slow on Azure
@testing.requires_testing_data
def test_plot_chpi_snr():
"""Test plotting cHPI SNRs."""
raw = read_raw_fif(chpi_fif_fname, allow_maxshield='yes')
result = compute_chpi_snr(raw)
# test figure creation
fig = plot_chpi_snr(result)
assert len(fig.axes) == len(result) - 2
assert len(fig.axes[0].lines) == len(result['freqs'])
assert len(fig.legends) == 1
texts = [entry.get_text() for entry in fig.legends[0].get_texts()]
assert len(texts) == len(result['freqs'])
freqs = [float(text.split()[0]) for text in texts]
assert_array_equal(freqs, result['freqs'])
# test user-passed axes
_, axs = plt.subplots(2, 3)
_ = plot_chpi_snr(result, axes=axs.ravel())
# test error
_, axs = plt.subplots(5)
with pytest.raises(ValueError, match='a list of 6 axes, got length 5'):
_ = plot_chpi_snr(result, axes=axs.ravel())
| bsd-3-clause | 5b66ac64c50a92842d921704befbaaad | 40.675958 | 79 | 0.613745 | 3.233577 | false | true | false | false |
mne-tools/mne-python | mne/preprocessing/tests/test_ssp.py | 8 | 8256 | import os.path as op
import pytest
from numpy.testing import assert_array_almost_equal
import numpy as np
from mne.io import read_raw_fif, read_raw_ctf
from mne.io.proj import make_projector, activate_proj
from mne.preprocessing.ssp import compute_proj_ecg, compute_proj_eog
from mne.datasets import testing
from mne import pick_types
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
dur_use = 5.0
eog_times = np.array([0.5, 2.3, 3.6, 14.5])
ctf_fname = op.join(testing.data_path(download=False), 'CTF',
'testdata_ctf.ds')
@pytest.fixture()
def short_raw():
"""Create a short, picked raw instance."""
raw = read_raw_fif(raw_fname).crop(0, 7).pick_types(
meg=True, eeg=True, eog=True)
raw.pick(raw.ch_names[:306:10] + raw.ch_names[306:]).load_data()
raw.info.normalize_proj()
return raw
@pytest.mark.parametrize('average', (True, False))
def test_compute_proj_ecg(short_raw, average):
"""Test computation of ECG SSP projectors."""
raw = short_raw
# For speed, let's not filter here (must also not reject then)
with pytest.warns(RuntimeWarning, match='Attenuation'):
projs, events = compute_proj_ecg(
raw, n_mag=2, n_grad=2, n_eeg=2, ch_name='MEG 1531',
bads=['MEG 2443'], average=average, avg_ref=True, no_proj=True,
l_freq=None, h_freq=None, reject=None, tmax=dur_use,
qrs_threshold=0.5, filter_length=1000)
assert len(projs) == 7
# heart rate at least 0.5 Hz, but less than 3 Hz
assert (events.shape[0] > 0.5 * dur_use and
events.shape[0] < 3 * dur_use)
ssp_ecg = [proj for proj in projs if proj['desc'].startswith('ECG')]
# check that the first principal component have a certain minimum
ssp_ecg = [proj for proj in ssp_ecg if 'PCA-01' in proj['desc']]
thresh_eeg, thresh_axial, thresh_planar = .9, .3, .1
for proj in ssp_ecg:
if 'planar' in proj['desc']:
assert proj['explained_var'] > thresh_planar
elif 'axial' in proj['desc']:
assert proj['explained_var'] > thresh_axial
elif 'eeg' in proj['desc']:
assert proj['explained_var'] > thresh_eeg
# XXX: better tests
# without setting a bad channel, this should throw a warning
with pytest.warns(RuntimeWarning, match='No good epochs found'):
projs, events, drop_log = compute_proj_ecg(
raw, n_mag=2, n_grad=2, n_eeg=2, ch_name='MEG 1531', bads=[],
average=average, avg_ref=True, no_proj=True, l_freq=None,
h_freq=None, tmax=dur_use, return_drop_log=True,
# XXX can be removed once
# XXX https://github.com/mne-tools/mne-python/issues/9273
# XXX has been resolved:
qrs_threshold=1e-15)
assert projs == []
assert len(events) == len(drop_log)
@pytest.mark.parametrize('average', [True, False])
def test_compute_proj_eog(average, short_raw):
"""Test computation of EOG SSP projectors."""
raw = short_raw
n_projs_init = len(raw.info['projs'])
with pytest.warns(RuntimeWarning, match='Attenuation'):
projs, events = compute_proj_eog(
raw, n_mag=2, n_grad=2, n_eeg=2, bads=['MEG 2443'],
average=average, avg_ref=True, no_proj=False, l_freq=None,
h_freq=None, reject=None, tmax=dur_use, filter_length=1000)
assert (len(projs) == (7 + n_projs_init))
assert (np.abs(events.shape[0] -
np.sum(np.less(eog_times, dur_use))) <= 1)
ssp_eog = [proj for proj in projs if proj['desc'].startswith('EOG')]
# check that the first principal component have a certain minimum
ssp_eog = [proj for proj in ssp_eog if 'PCA-01' in proj['desc']]
thresh_eeg, thresh_axial, thresh_planar = .9, .3, .1
for proj in ssp_eog:
if 'planar' in proj['desc']:
assert (proj['explained_var'] > thresh_planar)
elif 'axial' in proj['desc']:
assert (proj['explained_var'] > thresh_axial)
elif 'eeg' in proj['desc']:
assert (proj['explained_var'] > thresh_eeg)
# XXX: better tests
with pytest.warns(RuntimeWarning, match='longer'):
projs, events = compute_proj_eog(
raw, n_mag=2, n_grad=2, n_eeg=2, average=average, bads=[],
avg_ref=True, no_proj=False, l_freq=None, h_freq=None,
tmax=dur_use)
assert projs == []
raw._data[raw.ch_names.index('EOG 061'), :] = 1.
with pytest.warns(RuntimeWarning, match='filter.*longer than the signal'):
projs, events = compute_proj_eog(raw=raw, tmax=dur_use,
ch_name='EOG 061')
@pytest.mark.slowtest # can be slow on OSX
def test_compute_proj_parallel(short_raw):
"""Test computation of ExG projectors using parallelization."""
short_raw = short_raw.copy().pick(('eeg', 'eog')).resample(100)
raw = short_raw.copy()
with pytest.warns(RuntimeWarning, match='Attenuation'):
projs, _ = compute_proj_eog(
raw, n_eeg=2, bads=raw.ch_names[1:2], average=False,
avg_ref=True, no_proj=False, n_jobs=None, l_freq=None, h_freq=None,
reject=None, tmax=dur_use, filter_length=100)
raw_2 = short_raw.copy()
with pytest.warns(RuntimeWarning, match='Attenuation'):
projs_2, _ = compute_proj_eog(
raw_2, n_eeg=2, bads=raw.ch_names[1:2],
average=False, avg_ref=True, no_proj=False, n_jobs=2,
l_freq=None, h_freq=None, reject=None, tmax=dur_use,
filter_length=100)
projs = activate_proj(projs)
projs_2 = activate_proj(projs_2)
projs, _, _ = make_projector(projs, raw_2.info['ch_names'],
bads=['MEG 2443'])
projs_2, _, _ = make_projector(projs_2, raw_2.info['ch_names'],
bads=['MEG 2443'])
assert_array_almost_equal(projs, projs_2, 10)
def _check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs):
assert projs is not None
for p in projs:
if 'planar' in p['desc']:
assert len(p['data']['col_names']) == n_grads
elif 'axial' in p['desc']:
assert len(p['data']['col_names']) == n_mags
elif 'eeg' in p['desc']:
assert len(p['data']['col_names']) == n_eegs
@pytest.mark.slowtest # can be slow on OSX
@testing.requires_testing_data
def test_compute_proj_ctf():
"""Test to show that projector code completes on CTF data."""
raw = read_raw_ctf(ctf_fname, preload=True)
# expected channels per projector type
mag_picks = pick_types(
raw.info, meg='mag', ref_meg=False, exclude='bads')[::10]
n_mags = len(mag_picks)
grad_picks = pick_types(raw.info, meg='grad', ref_meg=False,
exclude='bads')[::10]
n_grads = len(grad_picks)
eeg_picks = pick_types(raw.info, meg=False, eeg=True, ref_meg=False,
exclude='bads')[2::3]
n_eegs = len(eeg_picks)
ref_picks = pick_types(raw.info, meg=False, ref_meg=True)
raw.pick(np.sort(np.concatenate(
[mag_picks, grad_picks, eeg_picks, ref_picks])))
del mag_picks, grad_picks, eeg_picks, ref_picks
# Test with and without gradient compensation
raw.apply_gradient_compensation(0)
n_projs_init = len(raw.info['projs'])
with pytest.warns(RuntimeWarning, match='Attenuation'):
projs, _ = compute_proj_eog(
raw, n_mag=2, n_grad=2, n_eeg=2, average=True, ch_name='EEG059',
avg_ref=True, no_proj=False, l_freq=None, h_freq=None,
reject=None, tmax=dur_use, filter_length=1000)
_check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs)
assert len(projs) == (5 + n_projs_init)
raw.apply_gradient_compensation(1)
with pytest.warns(RuntimeWarning, match='Attenuation'):
projs, _ = compute_proj_ecg(
raw, n_mag=1, n_grad=1, n_eeg=2, average=True, ch_name='EEG059',
avg_ref=True, no_proj=False, l_freq=None, h_freq=None,
reject=None, tmax=dur_use, filter_length=1000)
_check_projs_for_expected_channels(projs, n_mags, n_grads, n_eegs)
assert len(projs) == (4 + n_projs_init)
| bsd-3-clause | b431f20ea29ffc9ee6c7c56f9b7783e6 | 42.225131 | 79 | 0.608164 | 3.017544 | false | true | false | false |
mne-tools/mne-python | mne/datasets/eegbci/eegbci.py | 6 | 7470 | # Author: Martin Billinger <martin.billinger@tugraz.at>
# Adam Li <adam2392@gmail.com>
# Daniel McCloy <dan@mccloy.info>
# License: BSD Style.
import os
from os import path as op
import pkg_resources
import re
from pathlib import Path
from ..utils import _get_path, _do_path_update
from ...utils import _url_to_local_path, verbose
EEGMI_URL = 'https://physionet.org/files/eegmmidb/1.0.0/'
@verbose
def data_path(url, path=None, force_update=False, update_path=None, *,
verbose=None):
"""Get path to local copy of EEGMMI dataset URL.
This is a low-level function useful for getting a local copy of a
remote EEGBCI dataset :footcite:`SchalkEtAl2004` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`.
Parameters
----------
url : str
The dataset to use.
path : None | str
Location of where to look for the EEGBCI data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the EEGBCI dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : list of Path
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
Notes
-----
For example, one could do:
>>> from mne.datasets import eegbci
>>> url = 'http://www.physionet.org/physiobank/database/eegmmidb/'
>>> eegbci.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
This would download the given EEGBCI data file to the 'datasets' folder,
and prompt the user to save the 'datasets' path to the mne-python config,
if it isn't there already.
References
----------
.. footbibliography::
""" # noqa: E501
import pooch
key = 'MNE_DATASETS_EEGBCI_PATH'
name = 'EEGBCI'
path = _get_path(path, key, name)
fname = 'MNE-eegbci-data'
destination = _url_to_local_path(url, op.join(path, fname))
destinations = [destination]
# Fetch the file
if not op.isfile(destination) or force_update:
if op.isfile(destination):
os.remove(destination)
if not op.isdir(op.dirname(destination)):
os.makedirs(op.dirname(destination))
pooch.retrieve(
# URL to one of Pooch's test files
url=url,
path=destination,
fname=fname
)
# Offer to update the path
_do_path_update(path, update_path, key, name)
destinations = [Path(dest) for dest in destinations]
return destinations
@verbose
def load_data(subject, runs, path=None, force_update=False, update_path=None,
base_url=EEGMI_URL, verbose=None): # noqa: D301
"""Get paths to local copies of EEGBCI dataset files.
This will fetch data for the EEGBCI dataset :footcite:`SchalkEtAl2004`, which is also
available at PhysioNet :footcite:`GoldbergerEtAl2000`.
Parameters
----------
subject : int
The subject to use. Can be in the range of 1-109 (inclusive).
runs : int | list of int
The runs to use. See Notes for details.
path : None | str
Location of where to look for the EEGBCI data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_EEGBCI_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the EEGBCI dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_EEGBCI_PATH in mne-python
config to the given path. If None, the user is prompted.
base_url : str
The URL root for the data.
%(verbose)s
Returns
-------
paths : list
List of local data paths of the given type.
Notes
-----
The run numbers correspond to:
========= ===================================
run task
========= ===================================
1 Baseline, eyes open
2 Baseline, eyes closed
3, 7, 11 Motor execution: left vs right hand
4, 8, 12 Motor imagery: left vs right hand
5, 9, 13 Motor execution: hands vs feet
6, 10, 14 Motor imagery: hands vs feet
========= ===================================
For example, one could do::
>>> from mne.datasets import eegbci
>>> eegbci.load_data(1, [4, 10, 14], os.getenv('HOME') + '/datasets') # doctest:+SKIP
This would download runs 4, 10, and 14 (hand/foot motor imagery) runs from
subject 1 in the EEGBCI dataset to the 'datasets' folder, and prompt the
user to save the 'datasets' path to the mne-python config, if it isn't
there already.
References
----------
.. footbibliography::
""" # noqa: E501
import pooch
if not hasattr(runs, '__iter__'):
runs = [runs]
# get local storage path
config_key = 'MNE_DATASETS_EEGBCI_PATH'
folder = 'MNE-eegbci-data'
name = 'EEGBCI'
path = _get_path(path, config_key, name)
# extract path parts
pattern = r'(?:https?://.*)(files)/(eegmmidb)/(\d+\.\d+\.\d+)/?'
match = re.compile(pattern).match(base_url)
if match is None:
raise ValueError('base_url does not match the expected EEGMI folder '
'structure. Please notify MNE-Python developers.')
base_path = op.join(path, folder, *match.groups())
# create the download manager
fetcher = pooch.create(
path=base_path,
base_url=base_url,
version=None, # Data versioning is decoupled from MNE-Python version.
registry=None, # Registry is loaded from file, below.
retry_if_failed=2 # 2 retries = 3 total attempts
)
# load the checksum registry
registry = pkg_resources.resource_stream(
'mne', op.join('data', 'eegbci_checksums.txt'))
fetcher.load_registry(registry)
# fetch the file(s)
data_paths = []
for run in runs:
file_part = f'S{subject:03d}/S{subject:03d}R{run:02d}.edf'
destination = op.join(base_path, file_part)
if force_update and op.isfile(destination):
os.remove(destination)
data_paths.append(fetcher.fetch(file_part))
# update path in config if desired
_do_path_update(path, update_path, config_key, name)
return data_paths
def standardize(raw):
"""Standardize channel positions and names.
Parameters
----------
raw : instance of Raw
The raw data to standardize. Operates in-place.
"""
rename = dict()
for name in raw.ch_names:
std_name = name.strip('.')
std_name = std_name.upper()
if std_name.endswith('Z'):
std_name = std_name[:-1] + 'z'
if std_name.startswith('FP'):
std_name = 'Fp' + std_name[2:]
rename[name] = std_name
raw.rename_channels(rename)
| bsd-3-clause | e94589c3f4be7d17570cbb1fb436c2a0 | 32.648649 | 116 | 0.613655 | 3.663561 | false | false | false | false |
mne-tools/mne-python | tutorials/raw/10_raw_overview.py | 9 | 26059 | # -*- coding: utf-8 -*-
"""
.. _tut-raw-class:
=======================================
The Raw data structure: continuous data
=======================================
This tutorial covers the basics of working with raw EEG/MEG data in Python. It
introduces the :class:`~mne.io.Raw` data structure in detail, including how to
load, query, subselect, export, and plot data from a :class:`~mne.io.Raw`
object. For more info on visualization of :class:`~mne.io.Raw` objects, see
:ref:`tut-visualize-raw`. For info on creating a :class:`~mne.io.Raw` object
from simulated data in a :class:`NumPy array <numpy.ndarray>`, see
:ref:`tut-creating-data-structures`.
As usual we'll start by importing the modules we need:
"""
# %%
import os
import numpy as np
import matplotlib.pyplot as plt
import mne
# %%
# Loading continuous data
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# .. admonition:: Datasets in MNE-Python
# :class: sidebar note
#
# There are ``data_path`` functions for several example datasets in
# MNE-Python (e.g., :func:`mne.datasets.kiloword.data_path`,
# :func:`mne.datasets.spm_face.data_path`, etc). All of them will check the
# default download location first to see if the dataset is already on your
# computer, and only download it if necessary. The default download
# location is also configurable; see the documentation of any of the
# ``data_path`` functions for more information.
#
# As mentioned in :ref:`the introductory tutorial <tut-overview>`,
# MNE-Python data structures are based around
# the :file:`.fif` file format from Neuromag. This tutorial uses an
# :ref:`example dataset <sample-dataset>` in :file:`.fif` format, so here we'll
# use the function :func:`mne.io.read_raw_fif` to load the raw data; there are
# reader functions for :ref:`a wide variety of other data formats
# <data-formats>` as well.
#
# There are also :ref:`several other example datasets
# <datasets>` that can be downloaded with just a few lines
# of code. Functions for downloading example datasets are in the
# :mod:`mne.datasets` submodule; here we'll use
# :func:`mne.datasets.sample.data_path` to download the ":ref:`sample-dataset`"
# dataset, which contains EEG, MEG, and structural MRI data from one subject
# performing an audiovisual experiment. When it's done downloading,
# :func:`~mne.datasets.sample.data_path` will return the folder location where
# it put the files; you can navigate there with your file browser if you want
# to examine the files yourself. Once we have the file path, we can load the
# data with :func:`~mne.io.read_raw_fif`. This will return a
# :class:`~mne.io.Raw` object, which we'll store in a variable called ``raw``.
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file)
# %%
# As you can see above, :func:`~mne.io.read_raw_fif` automatically displays
# some information about the file it's loading. For example, here it tells us
# that there are three "projection items" in the file along with the recorded
# data; those are :term:`SSP projectors <projector>` calculated to remove
# environmental noise from the MEG signals, and are discussed in a the tutorial
# :ref:`tut-projectors-background`.
# In addition to the information displayed during loading, you can
# get a glimpse of the basic details of a :class:`~mne.io.Raw` object by
# printing it:
print(raw)
# %%
# By default, the :samp:`mne.io.read_raw_{*}` family of functions will *not*
# load the data into memory (instead the data on disk are `memory-mapped`_,
# meaning the data are only read from disk as-needed). Some operations (such as
# filtering) require that the data be copied into RAM; to do that we could have
# passed the ``preload=True`` parameter to :func:`~mne.io.read_raw_fif`, but we
# can also copy the data into RAM at any time using the
# :meth:`~mne.io.Raw.load_data` method. However, since this particular tutorial
# doesn't do any serious analysis of the data, we'll first
# :meth:`~mne.io.Raw.crop` the :class:`~mne.io.Raw` object to 60 seconds so it
# uses less memory and runs more smoothly on our documentation server.
raw.crop(tmax=60)
# %%
# Querying the Raw object
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# .. admonition:: Attributes vs. Methods
# :class: sidebar hint
#
# **Attributes** are usually static properties of Python objects — things
# that are pre-computed and stored as part of the object's representation
# in memory. Attributes are accessed with the ``.`` operator and do not
# require parentheses after the attribute name (example: ``raw.ch_names``).
#
# **Methods** are like specialized functions attached to an object.
# Usually they require additional user input and/or need some computation
# to yield a result. Methods always have parentheses at the end; additional
# arguments (if any) go inside those parentheses (examples:
# ``raw.estimate_rank()``, ``raw.drop_channels(['EEG 030', 'MEG 2242'])``).
#
# We saw above that printing the :class:`~mne.io.Raw` object displays some
# basic information like the total number of channels, the number of time
# points at which the data were sampled, total duration, and the approximate
# size in memory. Much more information is available through the various
# *attributes* and *methods* of the :class:`~mne.io.Raw` class. Some useful
# attributes of :class:`~mne.io.Raw` objects include a list of the channel
# names (:attr:`~mne.io.Raw.ch_names`), an array of the sample times in seconds
# (:attr:`~mne.io.Raw.times`), and the total number of samples
# (:attr:`~mne.io.Raw.n_times`); a list of all attributes and methods is given
# in the documentation of the :class:`~mne.io.Raw` class.
#
#
# The ``Raw.info`` attribute
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# There is also quite a lot of information stored in the ``raw.info``
# attribute, which stores an :class:`~mne.Info` object that is similar to a
# :class:`Python dictionary <dict>` (in that it has fields accessed via named
# keys). Like Python dictionaries, ``raw.info`` has a ``.keys()`` method that
# shows all the available field names; unlike Python dictionaries, printing
# ``raw.info`` will print a nicely-formatted glimpse of each field's data. See
# :ref:`tut-info-class` for more on what is stored in :class:`~mne.Info`
# objects, and how to interact with them.
n_time_samps = raw.n_times
time_secs = raw.times
ch_names = raw.ch_names
n_chan = len(ch_names) # note: there is no raw.n_channels attribute
print('the (cropped) sample data object has {} time samples and {} channels.'
''.format(n_time_samps, n_chan))
print('The last time sample is at {} seconds.'.format(time_secs[-1]))
print('The first few channel names are {}.'.format(', '.join(ch_names[:3])))
print() # insert a blank line in the output
# some examples of raw.info:
print('bad channels:', raw.info['bads']) # chs marked "bad" during acquisition
print(raw.info['sfreq'], 'Hz') # sampling frequency
print(raw.info['description'], '\n') # miscellaneous acquisition info
print(raw.info)
# %%
# .. note::
#
# Most of the fields of ``raw.info`` reflect metadata recorded at
# acquisition time, and should not be changed by the user. There are a few
# exceptions (such as ``raw.info['bads']`` and ``raw.info['projs']``), but
# in most cases there are dedicated MNE-Python functions or methods to
# update the :class:`~mne.Info` object safely (such as
# :meth:`~mne.io.Raw.add_proj` to update ``raw.info['projs']``).
#
# .. _`time-as-index`:
#
# Time, sample number, and sample index
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. admonition:: Sample numbering in VectorView data
# :class: sidebar warning
#
# For data from VectorView systems, it is important to distinguish *sample
# number* from *sample index*. See :term:`first_samp` for more information.
#
# One method of :class:`~mne.io.Raw` objects that is frequently useful is
# :meth:`~mne.io.Raw.time_as_index`, which converts a time (in seconds) into
# the integer index of the sample occurring closest to that time. The method
# can also take a list or array of times, and will return an array of indices.
#
# It is important to remember that there may not be a data sample at *exactly*
# the time requested, so the number of samples between ``time = 1`` second and
# ``time = 2`` seconds may be different than the number of samples between
# ``time = 2`` and ``time = 3``:
print(raw.time_as_index(20))
print(raw.time_as_index([20, 30, 40]), '\n')
print(np.diff(raw.time_as_index([1, 2, 3])))
# %%
# Modifying ``Raw`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. admonition:: ``len(raw)``
# :class: sidebar warning
#
# Although the :class:`~mne.io.Raw` object underlyingly stores data samples
# in a :class:`NumPy array <numpy.ndarray>` of shape (n_channels,
# n_timepoints), the :class:`~mne.io.Raw` object behaves differently from
# :class:`NumPy arrays <numpy.ndarray>` with respect to the :func:`len`
# function. ``len(raw)`` will return the number of timepoints (length along
# data axis 1), not the number of channels (length along data axis 0).
# Hence in this section you'll see ``len(raw.ch_names)`` to get the number
# of channels.
#
# :class:`~mne.io.Raw` objects have a number of methods that modify the
# :class:`~mne.io.Raw` instance in-place and return a reference to the modified
# instance. This can be useful for `method chaining`_
# (e.g., ``raw.crop(...).pick_channels(...).filter(...).plot()``)
# but it also poses a problem during interactive analysis: if you modify your
# :class:`~mne.io.Raw` object for an exploratory plot or analysis (say, by
# dropping some channels), you will then need to re-load the data (and repeat
# any earlier processing steps) to undo the channel-dropping and try something
# else. For that reason, the examples in this section frequently use the
# :meth:`~mne.io.Raw.copy` method before the other methods being demonstrated,
# so that the original :class:`~mne.io.Raw` object is still available in the
# variable ``raw`` for use in later examples.
#
#
# Selecting, dropping, and reordering channels
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Altering the channels of a :class:`~mne.io.Raw` object can be done in several
# ways. As a first example, we'll use the :meth:`~mne.io.Raw.pick_types` method
# to restrict the :class:`~mne.io.Raw` object to just the EEG and EOG channels:
eeg_and_eog = raw.copy().pick_types(meg=False, eeg=True, eog=True)
print(len(raw.ch_names), '→', len(eeg_and_eog.ch_names))
# %%
# Similar to the :meth:`~mne.io.Raw.pick_types` method, there is also the
# :meth:`~mne.io.Raw.pick_channels` method to pick channels by name, and a
# corresponding :meth:`~mne.io.Raw.drop_channels` method to remove channels by
# name:
raw_temp = raw.copy()
print('Number of channels in raw_temp:')
print(len(raw_temp.ch_names), end=' → drop two → ')
raw_temp.drop_channels(['EEG 037', 'EEG 059'])
print(len(raw_temp.ch_names), end=' → pick three → ')
raw_temp.pick_channels(['MEG 1811', 'EEG 017', 'EOG 061'])
print(len(raw_temp.ch_names))
# %%
# If you want the channels in a specific order (e.g., for plotting),
# :meth:`~mne.io.Raw.reorder_channels` works just like
# :meth:`~mne.io.Raw.pick_channels` but also reorders the channels; for
# example, here we pick the EOG and frontal EEG channels, putting the EOG
# first and the EEG in reverse order:
channel_names = ['EOG 061', 'EEG 003', 'EEG 002', 'EEG 001']
eog_and_frontal_eeg = raw.copy().reorder_channels(channel_names)
print(eog_and_frontal_eeg.ch_names)
# %%
# Changing channel name and type
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. admonition:: Long channel names
# :class: sidebar note
#
# Due to limitations in the :file:`.fif` file format (which MNE-Python uses
# to save :class:`~mne.io.Raw` objects), channel names are limited to a
# maximum of 15 characters.
#
# You may have noticed that the EEG channel names in the sample data are
# numbered rather than labelled according to a standard nomenclature such as
# the `10-20 <ten_twenty_>`_ or `10-05 <ten_oh_five_>`_ systems, or perhaps it
# bothers you that the channel names contain spaces. It is possible to rename
# channels using the :meth:`~mne.io.Raw.rename_channels` method, which takes a
# Python dictionary to map old names to new names. You need not rename all
# channels at once; provide only the dictionary entries for the channels you
# want to rename. Here's a frivolous example:
raw.rename_channels({'EOG 061': 'blink detector'})
# %%
# This next example replaces spaces in the channel names with underscores,
# using a Python `dict comprehension`_:
print(raw.ch_names[-3:])
channel_renaming_dict = {name: name.replace(' ', '_') for name in raw.ch_names}
raw.rename_channels(channel_renaming_dict)
print(raw.ch_names[-3:])
# %%
# If for some reason the channel types in your :class:`~mne.io.Raw` object are
# inaccurate, you can change the type of any channel with the
# :meth:`~mne.io.Raw.set_channel_types` method. The method takes a
# :class:`dictionary <dict>` mapping channel names to types; allowed types are
# ``ecg, eeg, emg, eog, exci, ias, misc, resp, seeg, dbs, stim, syst, ecog,
# hbo, hbr``. A common use case for changing channel type is when using frontal
# EEG electrodes as makeshift EOG channels:
raw.set_channel_types({'EEG_001': 'eog'})
print(raw.copy().pick_types(meg=False, eog=True).ch_names)
# %%
# Selection in the time domain
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you want to limit the time domain of a :class:`~mne.io.Raw` object, you
# can use the :meth:`~mne.io.Raw.crop` method, which modifies the
# :class:`~mne.io.Raw` object in place (we've seen this already at the start of
# this tutorial, when we cropped the :class:`~mne.io.Raw` object to 60 seconds
# to reduce memory demands). :meth:`~mne.io.Raw.crop` takes parameters ``tmin``
# and ``tmax``, both in seconds (here we'll again use :meth:`~mne.io.Raw.copy`
# first to avoid changing the original :class:`~mne.io.Raw` object):
raw_selection = raw.copy().crop(tmin=10, tmax=12.5)
print(raw_selection)
# %%
# :meth:`~mne.io.Raw.crop` also modifies the :attr:`~mne.io.Raw.first_samp` and
# :attr:`~mne.io.Raw.times` attributes, so that the first sample of the cropped
# object now corresponds to ``time = 0``. Accordingly, if you wanted to re-crop
# ``raw_selection`` from 11 to 12.5 seconds (instead of 10 to 12.5 as above)
# then the subsequent call to :meth:`~mne.io.Raw.crop` should get ``tmin=1``
# (not ``tmin=11``), and leave ``tmax`` unspecified to keep everything from
# ``tmin`` up to the end of the object:
print(raw_selection.times.min(), raw_selection.times.max())
raw_selection.crop(tmin=1)
print(raw_selection.times.min(), raw_selection.times.max())
# %%
# Remember that sample times don't always align exactly with requested ``tmin``
# or ``tmax`` values (due to sampling), which is why the ``max`` values of the
# cropped files don't exactly match the requested ``tmax`` (see
# :ref:`time-as-index` for further details).
#
# If you need to select discontinuous spans of a :class:`~mne.io.Raw` object —
# or combine two or more separate :class:`~mne.io.Raw` objects — you can use
# the :meth:`~mne.io.Raw.append` method:
raw_selection1 = raw.copy().crop(tmin=30, tmax=30.1) # 0.1 seconds
raw_selection2 = raw.copy().crop(tmin=40, tmax=41.1) # 1.1 seconds
raw_selection3 = raw.copy().crop(tmin=50, tmax=51.3) # 1.3 seconds
raw_selection1.append([raw_selection2, raw_selection3]) # 2.5 seconds total
print(raw_selection1.times.min(), raw_selection1.times.max())
# %%
# .. warning::
#
# Be careful when concatenating :class:`~mne.io.Raw` objects from different
# recordings, especially when saving: :meth:`~mne.io.Raw.append` only
# preserves the ``info`` attribute of the initial :class:`~mne.io.Raw`
# object (the one outside the :meth:`~mne.io.Raw.append` method call).
#
#
# Extracting data from ``Raw`` objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# So far we've been looking at ways to modify a :class:`~mne.io.Raw` object.
# This section shows how to extract the data from a :class:`~mne.io.Raw` object
# into a :class:`NumPy array <numpy.ndarray>`, for analysis or plotting using
# functions outside of MNE-Python. To select portions of the data,
# :class:`~mne.io.Raw` objects can be indexed using square brackets. However,
# indexing :class:`~mne.io.Raw` works differently than indexing a :class:`NumPy
# array <numpy.ndarray>` in two ways:
#
# 1. Along with the requested sample value(s) MNE-Python also returns an array
# of times (in seconds) corresponding to the requested samples. The data
# array and the times array are returned together as elements of a tuple.
#
# 2. The data array will always be 2-dimensional even if you request only a
# single time sample or a single channel.
#
#
# Extracting data by index
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# To illustrate the above two points, let's select a couple seconds of data
# from the first channel:
sampling_freq = raw.info['sfreq']
start_stop_seconds = np.array([11, 13])
start_sample, stop_sample = (start_stop_seconds * sampling_freq).astype(int)
channel_index = 0
raw_selection = raw[channel_index, start_sample:stop_sample]
print(raw_selection)
# %%
# You can see that it contains 2 arrays. This combination of data and times
# makes it easy to plot selections of raw data (although note that we're
# transposing the data array so that each channel is a column instead of a row,
# to match what matplotlib expects when plotting 2-dimensional ``y`` against
# 1-dimensional ``x``):
x = raw_selection[1]
y = raw_selection[0].T
plt.plot(x, y)
# %%
# Extracting channels by name
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The :class:`~mne.io.Raw` object can also be indexed with the names of
# channels instead of their index numbers. You can pass a single string to get
# just one channel, or a list of strings to select multiple channels. As with
# integer indexing, this will return a tuple of ``(data_array, times_array)``
# that can be easily plotted. Since we're plotting 2 channels this time, we'll
# add a vertical offset to one channel so it's not plotted right on top
# of the other one:
# sphinx_gallery_thumbnail_number = 2
channel_names = ['MEG_0712', 'MEG_1022']
two_meg_chans = raw[channel_names, start_sample:stop_sample]
y_offset = np.array([5e-11, 0]) # just enough to separate the channel traces
x = two_meg_chans[1]
y = two_meg_chans[0].T + y_offset
lines = plt.plot(x, y)
plt.legend(lines, channel_names)
# %%
# Extracting channels by type
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# There are several ways to select all channels of a given type from a
# :class:`~mne.io.Raw` object. The safest method is to use
# :func:`mne.pick_types` to obtain the integer indices of the channels you
# want, then use those indices with the square-bracket indexing method shown
# above. The :func:`~mne.pick_types` function uses the :class:`~mne.Info`
# attribute of the :class:`~mne.io.Raw` object to determine channel types, and
# takes boolean or string parameters to indicate which type(s) to retain. The
# ``meg`` parameter defaults to ``True``, and all others default to ``False``,
# so to get just the EEG channels, we pass ``eeg=True`` and ``meg=False``:
eeg_channel_indices = mne.pick_types(raw.info, meg=False, eeg=True)
eeg_data, times = raw[eeg_channel_indices]
print(eeg_data.shape)
# %%
# Some of the parameters of :func:`mne.pick_types` accept string arguments as
# well as booleans. For example, the ``meg`` parameter can take values
# ``'mag'``, ``'grad'``, ``'planar1'``, or ``'planar2'`` to select only
# magnetometers, all gradiometers, or a specific type of gradiometer. See the
# docstring of :meth:`mne.pick_types` for full details.
#
#
# The ``Raw.get_data()`` method
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# If you only want the data (not the corresponding array of times),
# :class:`~mne.io.Raw` objects have a :meth:`~mne.io.Raw.get_data` method. Used
# with no parameters specified, it will extract all data from all channels, in
# a (n_channels, n_timepoints) :class:`NumPy array <numpy.ndarray>`:
data = raw.get_data()
print(data.shape)
# %%
# If you want the array of times, :meth:`~mne.io.Raw.get_data` has an optional
# ``return_times`` parameter:
data, times = raw.get_data(return_times=True)
print(data.shape)
print(times.shape)
# %%
# The :meth:`~mne.io.Raw.get_data` method can also be used to extract specific
# channel(s) and sample ranges, via its ``picks``, ``start``, and ``stop``
# parameters. The ``picks`` parameter accepts integer channel indices, channel
# names, or channel types, and preserves the requested channel order given as
# its ``picks`` parameter.
first_channel_data = raw.get_data(picks=0)
eeg_and_eog_data = raw.get_data(picks=['eeg', 'eog'])
two_meg_chans_data = raw.get_data(picks=['MEG_0712', 'MEG_1022'],
start=1000, stop=2000)
print(first_channel_data.shape)
print(eeg_and_eog_data.shape)
print(two_meg_chans_data.shape)
# %%
# Summary of ways to extract data from ``Raw`` objects
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The following table summarizes the various ways of extracting data from a
# :class:`~mne.io.Raw` object.
#
# .. cssclass:: table-bordered
# .. rst-class:: midvalign
#
# +-------------------------------------+-------------------------+
# | Python code | Result |
# | | |
# | | |
# +=====================================+=========================+
# | ``raw.get_data()`` | :class:`NumPy array |
# | | <numpy.ndarray>` |
# | | (n_chans × n_samps) |
# +-------------------------------------+-------------------------+
# | ``raw[:]`` | :class:`tuple` of (data |
# +-------------------------------------+ (n_chans × n_samps), |
# | ``raw.get_data(return_times=True)`` | times (1 × n_samps)) |
# +-------------------------------------+-------------------------+
# | ``raw[0, 1000:2000]`` | |
# +-------------------------------------+ |
# | ``raw['MEG 0113', 1000:2000]`` | |
# +-------------------------------------+ |
# | ``raw.get_data(picks=0, | :class:`tuple` of |
# | start=1000, stop=2000, | (data (1 × 1000), |
# | return_times=True)`` | times (1 × 1000)) |
# +-------------------------------------+ |
# | ``raw.get_data(picks='MEG 0113', | |
# | start=1000, stop=2000, | |
# | return_times=True)`` | |
# +-------------------------------------+-------------------------+
# | ``raw[7:9, 1000:2000]`` | |
# +-------------------------------------+ |
# | ``raw[[2, 5], 1000:2000]`` | :class:`tuple` of |
# +-------------------------------------+ (data (2 × 1000), |
# | ``raw[['EEG 030', 'EOG 061'], | times (1 × 1000)) |
# | 1000:2000]`` | |
# +-------------------------------------+-------------------------+
#
#
# Exporting and saving Raw objects
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`~mne.io.Raw` objects have a built-in :meth:`~mne.io.Raw.save` method,
# which can be used to write a partially processed :class:`~mne.io.Raw` object
# to disk as a :file:`.fif` file, such that it can be re-loaded later with its
# various attributes intact (but see :ref:`precision` for an important
# note about numerical precision when saving).
#
# There are a few other ways to export just the sensor data from a
# :class:`~mne.io.Raw` object. One is to use indexing or the
# :meth:`~mne.io.Raw.get_data` method to extract the data, and use
# :func:`numpy.save` to save the data array:
data = raw.get_data()
np.save(file='my_data.npy', arr=data)
# %%
# It is also possible to export the data to a :class:`Pandas DataFrame
# <pandas.DataFrame>` object, and use the saving methods that :mod:`Pandas
# <pandas>` affords. The :class:`~mne.io.Raw` object's
# :meth:`~mne.io.Raw.to_data_frame` method is similar to
# :meth:`~mne.io.Raw.get_data` in that it has a ``picks`` parameter for
# restricting which channels are exported, and ``start`` and ``stop``
# parameters for restricting the time domain. Note that, by default, times will
# be converted to milliseconds, rounded to the nearest millisecond, and used as
# the DataFrame index; see the ``scaling_time`` parameter in the documentation
# of :meth:`~mne.io.Raw.to_data_frame` for more details.
sampling_freq = raw.info['sfreq']
start_end_secs = np.array([10, 13])
start_sample, stop_sample = (start_end_secs * sampling_freq).astype(int)
df = raw.to_data_frame(picks=['eeg'], start=start_sample, stop=stop_sample)
# then save using df.to_csv(...), df.to_hdf(...), etc
print(df.head())
# %%
# .. note::
# When exporting data as a :class:`NumPy array <numpy.ndarray>` or
# :class:`Pandas DataFrame <pandas.DataFrame>`, be sure to properly account
# for the :ref:`unit of representation <units>` in your subsequent
# analyses.
#
#
# .. LINKS
#
# .. _`method chaining`: https://en.wikipedia.org/wiki/Method_chaining
# .. _`memory-mapped`: https://en.wikipedia.org/wiki/Memory-mapped_file
# .. _ten_twenty: https://en.wikipedia.org/wiki/10%E2%80%9320_system_(EEG)
# .. _ten_oh_five: https://doi.org/10.1016%2FS1388-2457%2800%2900527-7
# .. _`dict comprehension`:
# https://docs.python.org/3/tutorial/datastructures.html#dictionaries
| bsd-3-clause | 4127bded636dee736daba98ee4a95ed7 | 44.677193 | 79 | 0.647987 | 3.404289 | false | false | false | false |
mne-tools/mne-python | mne/io/hitachi/hitachi.py | 3 | 12389 | # Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import datetime as dt
import re
import numpy as np
from ..base import BaseRaw
from ..constants import FIFF
from ..meas_info import create_info, _merge_info
from ..nirx.nirx import _read_csv_rows_cols
from ..utils import _mult_cal_one
from ...utils import (logger, verbose, fill_doc, warn, _check_fname,
_check_option)
@fill_doc
def read_raw_hitachi(fname, preload=False, verbose=None):
"""Reader for a Hitachi fNIRS recording.
Parameters
----------
%(hitachi_fname)s
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawHitachi
A Raw object containing Hitachi data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
%(hitachi_notes)s
"""
return RawHitachi(fname, preload, verbose=verbose)
def _check_bad(cond, msg):
if cond:
raise RuntimeError(f'Could not parse file: {msg}')
@fill_doc
class RawHitachi(BaseRaw):
"""Raw object from a Hitachi fNIRS file.
Parameters
----------
%(hitachi_fname)s
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
%(hitachi_notes)s
"""
@verbose
def __init__(self, fname, preload=False, *, verbose=None):
if not isinstance(fname, (list, tuple)):
fname = [fname]
fname = list(fname) # our own list that we can modify
for fi, this_fname in enumerate(fname):
fname[fi] = _check_fname(this_fname, 'read', True, f'fname[{fi}]')
infos = list()
probes = list()
last_samps = list()
S_offset = D_offset = 0
ignore_names = ['Time']
for this_fname in fname:
info, extra, last_samp, offsets = self._get_hitachi_info(
this_fname, S_offset, D_offset, ignore_names)
ignore_names = list(set(ignore_names + info['ch_names']))
S_offset += offsets[0]
D_offset += offsets[1]
infos.append(info)
probes.append(extra)
last_samps.append(last_samp)
# combine infos
if len(fname) > 1:
info = _merge_info(infos)
else:
info = infos[0]
if len(set(last_samps)) != 1:
raise RuntimeError('All files must have the same number of '
'samples, got: {last_samps}')
last_samps = [last_samps[0]]
raw_extras = [dict(probes=probes)]
# One representative filename is good enough here
# (additional filenames indicate temporal concat, not ch concat)
filenames = [fname[0]]
super().__init__(
info, preload, filenames=filenames, last_samps=last_samps,
raw_extras=raw_extras, verbose=verbose)
# This could be a function, but for the sake of indentation, let's make it
# a method instead
def _get_hitachi_info(self, fname, S_offset, D_offset, ignore_names):
logger.info('Loading %s' % fname)
raw_extra = dict(fname=fname)
info_extra = dict()
subject_info = dict()
ch_wavelengths = dict()
fnirs_wavelengths = [None, None]
meas_date = age = ch_names = sfreq = None
with open(fname, 'rb') as fid:
lines = fid.read()
lines = lines.decode('latin-1').rstrip('\r\n')
oldlen = len(lines)
assert len(lines) == oldlen
bounds = [0]
end = '\n' if '\n' in lines else '\r'
bounds.extend(a.end() for a in re.finditer(end, lines))
bounds.append(len(lines))
lines = lines.split(end)
assert len(bounds) == len(lines) + 1
line = lines[0].rstrip(',\r\n')
_check_bad(line != 'Header', 'no header found')
li = 0
mode = None
for li, line in enumerate(lines[1:], 1):
# Newer format has some blank lines
if len(line) == 0:
continue
parts = line.rstrip(',\r\n').split(',')
if len(parts) == 0: # some header lines are blank
continue
kind, parts = parts[0], parts[1:]
if len(parts) == 0:
parts = [''] # some fields (e.g., Comment) meaningfully blank
if kind == 'File Version':
logger.info(f'Reading Hitachi fNIRS file version {parts[0]}')
elif kind == 'AnalyzeMode':
_check_bad(
parts != ['Continuous'], f'not continuous data ({parts})')
elif kind == 'Sampling Period[s]':
sfreq = 1 / float(parts[0])
elif kind == 'Exception':
raise NotImplementedError(kind)
elif kind == 'Comment':
info_extra['description'] = parts[0]
elif kind == 'ID':
subject_info['his_id'] = parts[0]
elif kind == 'Name':
if len(parts):
name = parts[0].split(' ')
if len(name):
subject_info['first_name'] = name[0]
subject_info['last_name'] = ' '.join(name[1:])
elif kind == 'Age':
age = int(parts[0].rstrip('y'))
elif kind == 'Mode':
mode = parts[0]
elif kind in ('HPF[Hz]', 'LPF[Hz]'):
try:
freq = float(parts[0])
except ValueError:
pass
else:
info_extra[{'HPF[Hz]': 'highpass',
'LPF[Hz]': 'lowpass'}[kind]] = freq
elif kind == 'Date':
# 5/17/04 5:14
try:
mdy, HM = parts[0].split(' ')
H, M = HM.split(':')
if len(H) == 1:
H = f'0{H}'
mdyHM = ' '.join([mdy, ':'.join([H, M])])
for fmt in ('%m/%d/%y %H:%M', '%Y/%m/%d %H:%M'):
try:
meas_date = dt.datetime.strptime(mdyHM, fmt)
except Exception:
pass
else:
break
else:
raise RuntimeError # unknown format
except Exception:
warn('Extraction of measurement date failed. '
'Please report this as a github issue. '
'The date is being set to January 1st, 2000, '
f'instead of {repr(parts[0])}')
elif kind == 'Sex':
try:
subject_info['sex'] = dict(
female=FIFF.FIFFV_SUBJ_SEX_FEMALE,
male=FIFF.FIFFV_SUBJ_SEX_MALE)[parts[0].lower()]
except KeyError:
pass
elif kind == 'Wave[nm]':
fnirs_wavelengths[:] = [int(part) for part in parts]
elif kind == 'Wave Length':
ch_regex = re.compile(r'^(.*)\(([0-9\.]+)\)$')
for ent in parts:
_, v = ch_regex.match(ent).groups()
ch_wavelengths[ent] = float(v)
elif kind == 'Data':
break
fnirs_wavelengths = np.array(fnirs_wavelengths, int)
assert len(fnirs_wavelengths) == 2
ch_names = lines[li + 1].rstrip(',\r\n').split(',')
# cull to correct ones
raw_extra['keep_mask'] = ~np.in1d(ch_names, list(ignore_names))
for ci, ch_name in enumerate(ch_names):
if re.match('Probe[0-9]+', ch_name):
raw_extra['keep_mask'][ci] = False
# set types
ch_names = [ch_name for ci, ch_name in enumerate(ch_names)
if raw_extra['keep_mask'][ci]]
ch_types = ['fnirs_cw_amplitude' if ch_name.startswith('CH')
else 'stim'
for ch_name in ch_names]
# get locations
nirs_names = [ch_name for ch_name, ch_type in zip(ch_names, ch_types)
if ch_type == 'fnirs_cw_amplitude']
n_nirs = len(nirs_names)
assert n_nirs % 2 == 0
names = {
'3x3': 'ETG-100',
'3x5': 'ETG-7000',
'4x4': 'ETG-7000',
'3x11': 'ETG-4000',
}
_check_option('Hitachi mode', mode, sorted(names))
n_row, n_col = [int(x) for x in mode.split('x')]
logger.info(f'Constructing pairing matrix for {names[mode]} ({mode})')
pairs = _compute_pairs(n_row, n_col, n=1 + (mode == '3x3'))
assert n_nirs == len(pairs) * 2
locs = np.zeros((len(ch_names), 12))
locs[:, :9] = np.nan
idxs = np.where(np.array(ch_types, 'U') == 'fnirs_cw_amplitude')[0]
for ii, idx in enumerate(idxs):
ch_name = ch_names[idx]
# Use the actual/accurate wavelength in loc
acc_freq = ch_wavelengths[ch_name]
locs[idx][9] = acc_freq
# Rename channel based on standard naming scheme, using the
# nominal wavelength
sidx, didx = pairs[ii // 2]
nom_freq = fnirs_wavelengths[np.argmin(np.abs(
acc_freq - fnirs_wavelengths))]
ch_names[idx] = (
f'S{S_offset + sidx + 1}_'
f'D{D_offset + didx + 1} '
f'{nom_freq}'
)
offsets = np.array(pairs, int).max(axis=0) + 1
# figure out bounds
bounds = raw_extra['bounds'] = bounds[li + 2:]
last_samp = len(bounds) - 2
if age is not None and meas_date is not None:
subject_info['birthday'] = (meas_date.year - age,
meas_date.month,
meas_date.day)
if meas_date is None:
meas_date = dt.datetime(2000, 1, 1, 0, 0, 0)
meas_date = meas_date.replace(tzinfo=dt.timezone.utc)
if subject_info:
info_extra['subject_info'] = subject_info
# Create mne structure
info = create_info(ch_names, sfreq, ch_types=ch_types)
with info._unlock():
info.update(info_extra)
info['meas_date'] = meas_date
for li, loc in enumerate(locs):
info['chs'][li]['loc'][:] = loc
return info, raw_extra, last_samp, offsets
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
this_data = list()
for this_probe in self._raw_extras[fi]['probes']:
this_data.append(_read_csv_rows_cols(
this_probe['fname'],
start, stop, this_probe['keep_mask'],
this_probe['bounds'], sep=',',
replace=lambda x:
x.replace('\r', '\n')
.replace('\n\n', '\n')
.replace('\n', ',')
.replace(':', '')).T)
this_data = np.concatenate(this_data, axis=0)
_mult_cal_one(data, this_data, idx, cals, mult)
return data
def _compute_pairs(n_rows, n_cols, n=1):
n_tot = n_rows * n_cols
sd_idx = (np.arange(n_tot) // 2).reshape(n_rows, n_cols)
d_bool = np.empty((n_rows, n_cols), bool)
for ri in range(n_rows):
d_bool[ri] = np.arange(ri, ri + n_cols) % 2
pairs = list()
for ri in range(n_rows):
# First iterate over connections within the row
for ci in range(n_cols - 1):
pair = (sd_idx[ri, ci], sd_idx[ri, ci + 1])
if d_bool[ri, ci]: # reverse
pair = pair[::-1]
pairs.append(pair)
# Next iterate over row-row connections, if applicable
if ri >= n_rows - 1:
continue
for ci in range(n_cols):
pair = (sd_idx[ri, ci], sd_idx[ri + 1, ci])
if d_bool[ri, ci]:
pair = pair[::-1]
pairs.append(pair)
if n > 1:
assert n == 2 # only one supported for now
pairs = np.array(pairs, int)
second = pairs + pairs.max(axis=0) + 1
pairs = np.r_[pairs, second]
pairs = tuple(tuple(row) for row in pairs)
return tuple(pairs)
| bsd-3-clause | f0c6e5a5bb844787bf900c70245a0b2f | 36.542424 | 78 | 0.488821 | 3.736128 | false | false | false | false |
mne-tools/mne-python | mne/viz/topo.py | 1 | 39390 | """Functions to plot M/EEG data on topo (one axes per channel)."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
from functools import partial
import numpy as np
from ..io.pick import channel_type, pick_types
from ..utils import _clean_names, _check_option, Bunch, fill_doc, _to_rgb
from ..channels.layout import _merge_ch_data, _pair_grad_sensors, find_layout
from ..defaults import _handle_default
from .utils import (_check_delayed_ssp, _draw_proj_checkbox,
add_background_image, plt_show, _setup_vmin_vmax,
DraggableColorbar, _setup_ax_spines,
_check_cov, _plot_masked_image)
@fill_doc
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None, legend=False):
"""Create iterator over channel positions.
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
Hence, this enables convenient topography plot customization.
Parameters
----------
%(info_not_none)s
layout : instance of mne.channels.Layout | None
The layout to use. If None, layout will be guessed.
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: ``function(axis, channel_index)``.
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : color
The figure face color. Defaults to black.
axis_facecolor : color
The axis face color. Defaults to black.
axis_spinecolor : color
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale : float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
legend : bool
If True, an additional axis is created in the bottom right corner
that can be used to, e.g., construct a legend. The index of this
axis will be -1.
Returns
-------
gen : generator
A generator that can be unpacked into:
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
return _iter_topography(info, layout, on_pick, fig, fig_facecolor,
axis_facecolor, axis_spinecolor, layout_scale,
legend=legend)
def _legend_axis(pos):
"""Add a legend axis to the bottom right."""
import matplotlib.pyplot as plt
left, bottom = pos[:, 0].max(), pos[:, 1].min()
# check if legend axis overlaps a data axis
overlaps = False
for _pos in pos:
h_overlap = (_pos[0] <= left <= (_pos[0] + _pos[2]))
v_overlap = (_pos[1] <= bottom <= (_pos[1] + _pos[3]))
if h_overlap and v_overlap:
overlaps = True
break
if overlaps:
left += 1.2 * _pos[2]
wid, hei = pos[-1, 2:]
return plt.axes([left, bottom, wid, hei])
def _iter_topography(info, layout, on_pick, fig, fig_facecolor='k',
axis_facecolor='k', axis_spinecolor='k',
layout_scale=None, unified=False, img=False, axes=None,
legend=False):
"""Iterate over topography.
Has the same parameters as iter_topography, plus:
unified : bool
If False (default), multiple matplotlib axes will be used.
If True, a single axis will be constructed. The former is
useful for custom plotting, the latter for speed.
"""
from matplotlib import pyplot as plt, collections
if fig is None:
fig = plt.figure()
def format_coord_unified(x, y, pos=None, ch_names=None):
"""Update status bar with channel name under cursor."""
# find candidate channels (ones that are down and left from cursor)
pdist = np.array([x, y]) - pos[:, :2]
pind = np.where((pdist >= 0).all(axis=1))[0]
if len(pind) > 0:
# find the closest channel
closest = pind[np.sum(pdist[pind, :]**2, axis=1).argmin()]
# check whether we are inside its box
in_box = (pdist[closest, :] < pos[closest, 2:]).all()
else:
in_box = False
return (('%s (click to magnify)' % ch_names[closest]) if
in_box else 'No channel here')
def format_coord_multiaxis(x, y, ch_name=None):
"""Update status bar with channel name under cursor."""
return '%s (click to magnify)' % ch_name
fig.set_facecolor(fig_facecolor)
if layout is None:
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
if unified:
if axes is None:
under_ax = plt.axes([0, 0, 1, 1])
under_ax.axis('off')
else:
under_ax = axes
under_ax.format_coord = partial(format_coord_unified, pos=pos,
ch_names=layout.names)
under_ax.set(xlim=[0, 1], ylim=[0, 1])
axs = list()
for idx, name in iter_ch:
ch_idx = ch_names.index(name)
if not unified: # old, slow way
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
for spine in ax.spines.values():
spine.set_color(axis_spinecolor)
if not legend:
ax.set(xticklabels=[], yticklabels=[])
for tick in ax.get_xticklines() + ax.get_yticklines():
tick.set_visible(False)
ax._mne_ch_name = name
ax._mne_ch_idx = ch_idx
ax._mne_ax_face_color = axis_facecolor
ax.format_coord = partial(format_coord_multiaxis, ch_name=name)
yield ax, ch_idx
else:
ax = Bunch(ax=under_ax, pos=pos[idx], data_lines=list(),
_mne_ch_name=name, _mne_ch_idx=ch_idx,
_mne_ax_face_color=axis_facecolor)
axs.append(ax)
if not unified and legend:
ax = _legend_axis(pos)
yield ax, -1
if unified:
under_ax._mne_axs = axs
# Create a PolyCollection for the axis backgrounds
verts = np.transpose([pos[:, :2],
pos[:, :2] + pos[:, 2:] * [1, 0],
pos[:, :2] + pos[:, 2:],
pos[:, :2] + pos[:, 2:] * [0, 1],
], [1, 0, 2])
if not img:
under_ax.add_collection(collections.PolyCollection(
verts, facecolor=axis_facecolor, edgecolor=axis_spinecolor,
linewidth=1.)) # Not needed for image plots.
for ax in axs:
yield ax, ax._mne_ch_idx
def _plot_topo(info, times, show_func, click_func=None, layout=None,
vmin=None, vmax=None, ylim=None, colorbar=None, border='none',
axis_facecolor='k', fig_facecolor='k', cmap='RdBu_r',
layout_scale=None, title=None, x_label=None, y_label=None,
font_color='w', unified=False, img=False, axes=None):
"""Plot on sensor layout."""
import matplotlib.pyplot as plt
if layout.kind == 'custom':
layout = deepcopy(layout)
layout.pos[:, :2] -= layout.pos[:, :2].min(0)
layout.pos[:, :2] /= layout.pos[:, :2].max(0)
# prepare callbacks
tmin, tmax = times[0], times[-1]
click_func = show_func if click_func is None else click_func
on_pick = partial(click_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label)
if axes is None:
fig = plt.figure()
axes = plt.axes([0.015, 0.025, 0.97, 0.95])
axes.set_facecolor(fig_facecolor)
else:
fig = axes.figure
if colorbar:
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin, vmax))
sm.set_array(np.linspace(vmin, vmax))
cb = fig.colorbar(sm, ax=axes, pad=0.025, fraction=0.075, shrink=0.5,
anchor=(-1, 0.5))
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color=font_color)
axes.axis('off')
my_topo_plot = _iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor,
unified=unified, img=img, axes=axes)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if title is not None:
plt.figtext(0.03, 0.95, title, color=font_color, fontsize=15, va='top')
return fig
def _plot_topo_onpick(event, show_func):
"""Onpick callback that shows a single channel in a new figure."""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
import matplotlib.pyplot as plt
try:
if hasattr(orig_ax, '_mne_axs'): # in unified, single-axes mode
x, y = event.xdata, event.ydata
for ax in orig_ax._mne_axs:
if x >= ax.pos[0] and y >= ax.pos[1] and \
x <= ax.pos[0] + ax.pos[2] and \
y <= ax.pos[1] + ax.pos[3]:
orig_ax = ax
break
else:
# no axis found
return
elif not hasattr(orig_ax, '_mne_ch_idx'):
# neither old nor new mode
return
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_facecolor(face_color)
# allow custom function to override parameters
show_func(ax, ch_idx)
plt_show(fig=fig)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise
def _compute_ax_scalings(bn, xlim, ylim):
"""Compute scale factors for a unified plot."""
if isinstance(ylim[0], (tuple, list, np.ndarray)):
ylim = (ylim[0][0], ylim[1][0])
pos = bn.pos
bn.x_s = pos[2] / (xlim[1] - xlim[0])
bn.x_t = pos[0] - bn.x_s * xlim[0]
bn.y_s = pos[3] / (ylim[1] - ylim[0])
bn.y_t = pos[1] - bn.y_s * ylim[0]
def _check_vlim(vlim):
"""Check the vlim."""
return not np.isscalar(vlim) and vlim is not None
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, onselect, ylim=None,
tfr=None, freq=None, x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='auto',
mask=None, mask_style="both", mask_cmap="Greys",
mask_alpha=0.1, is_jointplot=False, cnorm=None):
"""Show time-frequency map as two-dimensional image."""
from matplotlib import pyplot as plt
from matplotlib.widgets import RectangleSelector
_check_option('yscale', yscale, ['auto', 'linear', 'log'])
cmap, interactive_cmap = cmap
times = np.linspace(tmin, tmax, num=tfr[ch_idx].shape[1])
img, t_end = _plot_masked_image(
ax, tfr[ch_idx], times, mask, yvals=freq, cmap=cmap,
vmin=vmin, vmax=vmax, mask_style=mask_style, mask_alpha=mask_alpha,
mask_cmap=mask_cmap, yscale=yscale, cnorm=cnorm)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
if isinstance(colorbar, DraggableColorbar):
cbar = colorbar.cbar # this happens with multiaxes case
else:
cbar = plt.colorbar(mappable=img, ax=ax)
if interactive_cmap:
ax.CB = DraggableColorbar(cbar, img)
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
return t_end
def _imshow_tfr_unified(bn, ch_idx, tmin, tmax, vmin, vmax, onselect,
ylim=None, tfr=None, freq=None, vline=None,
x_label=None, y_label=None, colorbar=False,
picker=True, cmap='RdBu_r', title=None, hline=None):
"""Show multiple tfrs on topo using a single axes."""
_compute_ax_scalings(bn, (tmin, tmax), (freq[0], freq[-1]))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax,
bn.y_t + bn.y_s * freq[0], bn.y_t + bn.y_s * freq[-1])
data_lines.append(ax.imshow(tfr[ch_idx], clip_on=True, clip_box=bn.pos,
extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, cmap=cmap))
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False, hline=None, hvline_color='w',
labels=None):
"""Show time series on topo split across multiple axes."""
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_, times_ in zip(data, color, times):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
line = ax.plot(times_, data_[ch_idx], color=color_, picker=True)[0]
line.set_pickradius(1e9)
picker_flag = True
else:
ax.plot(times_, data_[ch_idx], color=color_)
def _format_coord(x, y, labels, ax):
"""Create status string based on cursor coordinates."""
# find indices for datasets near cursor (if any)
tdiffs = [np.abs(tvec - x).min() for tvec in times]
nearby = [k for k, tdiff in enumerate(tdiffs) if
tdiff < (tmax - tmin) / 100]
xlabel = ax.get_xlabel()
xunit = (xlabel[xlabel.find('(') + 1:xlabel.find(')')]
if '(' in xlabel and ')' in xlabel else 's')
timestr = '%6.3f %s: ' % (x, xunit)
if not nearby:
return '%s Nothing here' % timestr
labels = [''] * len(nearby) if labels is None else labels
nearby_data = [(data[n], labels[n], times[n]) for n in nearby]
ylabel = ax.get_ylabel()
yunit = (ylabel[ylabel.find('(') + 1:ylabel.find(')')]
if '(' in ylabel and ')' in ylabel else '')
# try to estimate whether to truncate condition labels
slen = 9 + len(xunit) + sum([12 + len(yunit) + len(label)
for label in labels])
bar_width = (ax.figure.get_size_inches() * ax.figure.dpi)[0] / 5.5
# show labels and y values for datasets near cursor
trunc_labels = bar_width < slen
s = timestr
for data_, label, tvec in nearby_data:
idx = np.abs(tvec - x).argmin()
s += '%7.2f %s' % (data_[ch_idx, idx], yunit)
if trunc_labels:
label = (label if len(label) <= 10 else
'%s..%s' % (label[:6], label[-2:]))
s += ' [%s] ' % label if label else ' '
return s
ax.format_coord = lambda x, y: _format_coord(x, y, labels=labels, ax=ax)
def _cursor_vline(event):
"""Draw cursor (vertical line)."""
ax = event.inaxes
if not ax:
return
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = ax.axvline(event.xdata, color=ax._cursorcolor)
ax.figure.canvas.draw()
def _rm_cursor(event):
ax = event.inaxes
if ax._cursorline is not None:
ax._cursorline.remove()
ax._cursorline = None
ax.figure.canvas.draw()
ax._cursorline = None
# choose cursor color based on perceived brightness of background
facecol = _to_rgb(ax.get_facecolor())
face_brightness = np.dot(facecol, [299, 587, 114])
ax._cursorcolor = 'white' if face_brightness < 150 else 'black'
plt.connect('motion_notify_event', _cursor_vline)
plt.connect('axes_leave_event', _rm_cursor)
ymin, ymax = ax.get_ylim()
# don't pass vline or hline here (this fxn doesn't do hvline_color):
_setup_ax_spines(ax, [], tmin, tmax, ymin, ymax, hline=False)
ax.figure.set_facecolor('k' if hvline_color == 'w' else 'w')
ax.spines['bottom'].set_color(hvline_color)
ax.spines['left'].set_color(hvline_color)
ax.tick_params(axis='x', colors=hvline_color, which='both')
ax.tick_params(axis='y', colors=hvline_color, which='both')
ax.title.set_color(hvline_color)
ax.xaxis.label.set_color(hvline_color)
ax.yaxis.label.set_color(hvline_color)
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
if isinstance(y_label, list):
ax.set_ylabel(y_label[ch_idx])
else:
ax.set_ylabel(y_label)
if vline:
plt.axvline(vline, color=hvline_color, linewidth=1.0,
linestyle='--')
if hline:
plt.axhline(hline, color=hvline_color, linewidth=1.0, zorder=10)
if colorbar:
plt.colorbar()
def _plot_timeseries_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim, data,
color, times, vline=None, x_label=None,
y_label=None, colorbar=False, hline=None,
hvline_color='w'):
"""Show multiple time series on topo using a single axes."""
import matplotlib.pyplot as plt
if not (ylim and not any(v is None for v in ylim)):
ylim = [min(np.min(d) for d in data), max(np.max(d) for d in data)]
# Translation and scale parameters to take data->under_ax normalized coords
_compute_ax_scalings(bn, (tmin, tmax), ylim)
pos = bn.pos
data_lines = bn.data_lines
ax = bn.ax
# XXX These calls could probably be made faster by using collections
for data_, color_, times_ in zip(data, color, times):
data_lines.append(ax.plot(
bn.x_t + bn.x_s * times_, bn.y_t + bn.y_s * data_[ch_idx],
linewidth=0.5, color=color_, clip_on=True, clip_box=pos)[0])
if vline:
vline = np.array(vline) * bn.x_s + bn.x_t
ax.vlines(vline, pos[1], pos[1] + pos[3], color=hvline_color,
linewidth=0.5, linestyle='--')
if hline:
hline = np.array(hline) * bn.y_s + bn.y_t
ax.hlines(hline, pos[0], pos[0] + pos[2], color=hvline_color,
linewidth=0.5)
if x_label is not None:
ax.text(pos[0] + pos[2] / 2., pos[1], x_label,
horizontalalignment='center', verticalalignment='top')
if y_label is not None:
y_label = y_label[ch_idx] if isinstance(y_label, list) else y_label
ax.text(pos[0], pos[1] + pos[3] / 2., y_label,
horizontalignment='right', verticalalignment='middle',
rotation=90)
if colorbar:
plt.colorbar()
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, data=None,
epochs=None, sigma=None, order=None, scalings=None,
vline=None, x_label=None, y_label=None, colorbar=False,
cmap='RdBu_r', vlim_array=None):
"""Plot erfimage on sensor topography."""
from scipy import ndimage
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :]
if vlim_array is not None:
vmin, vmax = vlim_array[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
img = ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap, interpolation='nearest')
ax = plt.gca()
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
def _erfimage_imshow_unified(bn, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None, order=None,
scalings=None, vline=None, x_label=None,
y_label=None, colorbar=False, cmap='RdBu_r',
vlim_array=None):
"""Plot erfimage topography using a single axis."""
from scipy import ndimage
_compute_ax_scalings(bn, (tmin, tmax), (0, len(epochs.events)))
ax = bn.ax
data_lines = bn.data_lines
extent = (bn.x_t + bn.x_s * tmin, bn.x_t + bn.x_s * tmax, bn.y_t,
bn.y_t + bn.y_s * len(epochs.events))
this_data = data[:, ch_idx, :]
vmin, vmax = (None, None) if vlim_array is None else vlim_array[ch_idx]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
data_lines.append(ax.imshow(this_data, extent=extent, aspect='auto',
origin='lower', vmin=vmin, vmax=vmax,
picker=True, cmap=cmap,
interpolation='nearest'))
def _plot_evoked_topo(evoked, layout=None, layout_scale=0.945,
color=None, border='none', ylim=None, scalings=None,
title=None, proj=False, vline=(0.,), hline=(0.,),
fig_facecolor='k', fig_background=None,
axis_facecolor='k', font_color='w', merge_channels=False,
legend=True, axes=None, exclude='bads', show=True,
noise_cov=None):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
Matplotlib borders style to be used for each sensor plot.
ylim : dict | None
ylim for plots (after scaling has been applied). The value
determines the upper and lower subplot limits. e.g.
ylim = dict(eeg=[-20, 20]). Valid keys are eeg, mag, grad. If None,
the ylim parameter for each channel type is determined by the minimum
and maximum peak.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
title : str
Title of the figure.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
vline : list of floats | None
The values at which to show a vertical line.
hline : list of floats | None
The values at which to show a horizontal line.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
axis_facecolor : color
The face color to be used for each sensor plot. Defaults to black.
font_color : color
The color of text in the colorbar and title. Defaults to white.
merge_channels : bool
Whether to use RMS value of gradiometer pairs. Only works for Neuromag
data. Defaults to False.
legend : bool | int | string | tuple
If True, create a legend based on evoked.comment. If False, disable the
legend. Otherwise, the legend is created and the parameter value is
passed as the location parameter to the matplotlib legend call. It can
be an integer (e.g. 0 corresponds to upper right corner of the plot),
a string (e.g. 'upper right'), or a tuple (x, y coordinates of the
lower left corner of the legend in the axes coordinate system).
See matplotlib documentation for more details.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels names are shown in italic.
Can be a string to load a covariance from disk.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. By default, exclude is set to 'bads'.
show : bool
Show figure if True.
.. versionadded:: 0.16.0
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
import matplotlib.pyplot as plt
from ..cov import whiten_evoked
if not type(evoked) in (tuple, list):
evoked = [evoked]
noise_cov = _check_cov(noise_cov, evoked[0].info)
if noise_cov is not None:
evoked = [whiten_evoked(e, noise_cov) for e in evoked]
else:
evoked = [e.copy() for e in evoked]
info = evoked[0].info
ch_names = evoked[0].ch_names
scalings = _handle_default('scalings', scalings)
if not all(e.ch_names == ch_names for e in evoked):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if merge_channels:
picks = _pair_grad_sensors(info, topomap_coords=False, exclude=exclude)
chs = list()
for pick in picks[::2]:
ch = info['chs'][pick]
ch['ch_name'] = ch['ch_name'][:-1] + 'X'
chs.append(ch)
with info._unlock(update_redundant=True, check_after=True):
info['chs'] = chs
info['bads'] = list() # Bads handled by pair_grad_sensors
new_picks = list()
for e in evoked:
data, _ = _merge_ch_data(e.data[picks], 'grad', [])
if noise_cov is None:
data *= scalings['grad']
e.data = data
new_picks.append(range(len(data)))
picks = new_picks
types_used = ['grad']
unit = _handle_default('units')['grad'] if noise_cov is None else 'NA'
y_label = 'RMS amplitude (%s)' % unit
if layout is None:
layout = find_layout(info, exclude=exclude)
if not merge_channels:
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = [ch_name for ch_name in ch_names
if ch_name in layout.names]
types_used = [channel_type(info, ch_names.index(ch))
for ch in chs_in_layout]
# Using dict conversion to remove duplicates
types_used = list(dict.fromkeys(types_used))
# remove possible reference meg channels
types_used = [types_used for types_used in types_used
if types_used != 'ref_meg']
# one check for all vendors
is_meg = len([x for x in types_used if x in ['mag', 'grad']]) > 0
is_nirs = len([x for x in types_used if x in
('hbo', 'hbr', 'fnirs_cw_amplitude', 'fnirs_od')]) > 0
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=exclude)
for kk in types_used]
elif is_nirs:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, fnirs=kk, ref_meg=False, exclude=exclude)
for kk in types_used]
else:
types_used_kwargs = {t: True for t in types_used}
picks = [pick_types(info, meg=False, exclude=exclude,
**types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
if noise_cov is None:
for e in evoked:
for pick, ch_type in zip(picks, types_used):
e.data[pick] *= scalings[ch_type]
if proj is True and all(e.proj is not True for e in evoked):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
# Y labels for picked plots must be reconstructed
y_label = list()
for ch_idx in range(len(chs_in_layout)):
if noise_cov is None:
unit = _handle_default('units')[channel_type(info, ch_idx)]
else:
unit = 'NA'
y_label.append('Amplitude (%s)' % unit)
if ylim is None:
# find minima and maxima over all evoked data for each channel pick
ymaxes = np.array([max((e.data[t]).max() for e in evoked)
for t in picks])
ymins = np.array([min((e.data[t]).min() for e in evoked)
for t in picks])
ylim_ = (ymins, ymaxes)
elif isinstance(ylim, dict):
ylim_ = _handle_default('ylim', ylim)
ylim_ = [ylim_[kk] for kk in types_used]
# extra unpack to avoid bug #1700
if len(ylim_) == 1:
ylim_ = ylim_[0]
else:
ylim_ = [np.array(yl) for yl in ylim_]
# Transposing to avoid Zipping confusion
if is_meg or is_nirs:
ylim_ = list(map(list, zip(*ylim_)))
else:
raise TypeError('ylim must be None or a dict. Got %s.' % type(ylim))
data = [e.data for e in evoked]
comments = [e.comment for e in evoked]
times = [e.times for e in evoked]
show_func = partial(_plot_timeseries_unified, data=data, color=color,
times=times, vline=vline, hline=hline,
hvline_color=font_color)
click_func = partial(_plot_timeseries, data=data, color=color, times=times,
vline=vline, hline=hline, hvline_color=font_color,
labels=comments)
time_min = min([t[0] for t in times])
time_max = max([t[-1] for t in times])
fig = _plot_topo(info=info, times=[time_min, time_max],
show_func=show_func, click_func=click_func, layout=layout,
colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border,
fig_facecolor=fig_facecolor, font_color=font_color,
axis_facecolor=axis_facecolor, title=title,
x_label='Time (s)', y_label=y_label, unified=True,
axes=axes)
add_background_image(fig, fig_background)
if legend is not False:
legend_loc = 0 if legend is True else legend
labels = [e.comment if e.comment else 'Unknown' for e in evoked]
handles = fig.axes[0].lines[:len(evoked)]
legend = plt.legend(
labels=labels,
handles=handles,
loc=legend_loc,
prop={'size': 10})
legend.get_frame().set_facecolor(axis_facecolor)
txts = legend.get_texts()
for txt, col in zip(txts, color):
txt.set_color(col)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo_proj,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_update_evoked_topo_proj(params, bools):
"""Update topo sensor plots."""
evokeds = [e.copy() for e in params['evokeds']]
fig = params['fig']
projs = [proj for proj, b in zip(params['projs'], bools) if b]
params['proj_bools'] = bools
for e in evokeds:
e.add_proj(projs, remove_existing=True)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
for ax in fig.axes[0]._mne_axs:
for line, evoked in zip(ax.data_lines, evokeds):
line.set_ydata(ax.y_t + ax.y_s * evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
def plot_topo_image_epochs(epochs, layout=None, sigma=0., vmin=None,
vmax=None, colorbar=None, order=None, cmap='RdBu_r',
layout_scale=.95, title=None, scalings=None,
border='none', fig_facecolor='k',
fig_background=None, font_color='w', show=True):
"""Plot Event Related Potential / Fields image on topographies.
Parameters
----------
epochs : instance of :class:`~mne.Epochs`
The epochs.
layout : instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is µV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is µV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool | None
Whether to display a colorbar or not. If ``None`` a colorbar will be
shown only if all channels are of the same type. Defaults to ``None``.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : colormap
Colors to be mapped to the values.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
``None``, defaults to ``dict(eeg=1e6, grad=1e13, mag=1e15)``.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
:func:`matplotlib.pyplot.imshow`. Defaults to ``None``.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : instance of :class:`matplotlib.figure.Figure`
Figure distributing one image per channel across sensor topography.
Notes
-----
In an interactive Python session, this plot will be interactive; clicking
on a channel image will pop open a larger view of the image; this image
will always have a colorbar even when the topo plot does not (because it
shows multiple sensor types).
"""
scalings = _handle_default('scalings', scalings)
# make a copy because we discard non-data channels and scale the data
epochs = epochs.copy().load_data()
# use layout to subset channels present in epochs object
if layout is None:
layout = find_layout(epochs.info)
ch_names = set(layout.names) & set(epochs.ch_names)
idxs = [epochs.ch_names.index(ch_name) for ch_name in ch_names]
epochs = epochs.pick(idxs)
# get lists of channel type & scale coefficient
ch_types = epochs.get_channel_types()
scale_coeffs = [scalings.get(ch_type, 1) for ch_type in ch_types]
# scale the data
epochs._data *= np.array(scale_coeffs)[:, np.newaxis]
data = epochs.get_data()
# get vlims for each channel type
vlim_dict = dict()
for ch_type in set(ch_types):
this_data = data[:, np.where(np.array(ch_types) == ch_type)]
vlim_dict[ch_type] = _setup_vmin_vmax(this_data, vmin, vmax)
vlim_array = np.array([vlim_dict[ch_type] for ch_type in ch_types])
# only show colorbar if we have a single channel type
if colorbar is None:
colorbar = (len(set(ch_types)) == 1)
# if colorbar=True, we know we have only 1 channel type so all entries
# in vlim_array are the same, just take the first one
if colorbar and vmin is None and vmax is None:
vmin, vmax = vlim_array[0]
show_func = partial(_erfimage_imshow_unified, scalings=scale_coeffs,
order=order, data=data, epochs=epochs, sigma=sigma,
cmap=cmap, vlim_array=vlim_array)
erf_imshow = partial(_erfimage_imshow, scalings=scale_coeffs, order=order,
data=data, epochs=epochs, sigma=sigma, cmap=cmap,
vlim_array=vlim_array, colorbar=True)
fig = _plot_topo(info=epochs.info, times=epochs.times,
click_func=erf_imshow, show_func=show_func, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
fig_facecolor=fig_facecolor, font_color=font_color,
border=border, x_label='Time (s)', y_label='Epoch',
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
| bsd-3-clause | a3f97a57e2ca7b81397540154dab7bef | 40.461053 | 79 | 0.578831 | 3.600037 | false | false | false | false |
mne-tools/mne-python | mne/preprocessing/nirs/tests/test_scalp_coupling_index.py | 10 | 2725 | # Authors: Robert Luke <mail@robertluke.net>
# Eric Larson <larson.eric.d@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_less
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx
from mne.preprocessing.nirs import optical_density, scalp_coupling_index,\
beer_lambert_law
from mne.datasets import testing
fname_nirx_15_0 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_0_recording')
fname_nirx_15_2 = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording')
fname_nirx_15_2_short = op.join(data_path(download=False),
'NIRx', 'nirscout',
'nirx_15_2_recording_w_short')
@testing.requires_testing_data
@pytest.mark.parametrize('fname', ([fname_nirx_15_2_short, fname_nirx_15_2,
fname_nirx_15_0]))
@pytest.mark.parametrize('fmt', ('nirx', 'fif'))
def test_scalp_coupling_index(fname, fmt, tmp_path):
"""Test converting NIRX files."""
assert fmt in ('nirx', 'fif')
raw = read_raw_nirx(fname)
with pytest.raises(RuntimeError, match='Scalp'):
scalp_coupling_index(raw)
raw = optical_density(raw)
sci = scalp_coupling_index(raw)
# All values should be between -1 and +1
assert_array_less(sci, 1.0)
assert_array_less(sci * -1.0, 1.0)
# Fill in some data with known correlation values
rng = np.random.RandomState(0)
new_data = rng.rand(raw._data[0].shape[0])
# Set first two channels to perfect correlation
raw._data[0] = new_data
raw._data[1] = new_data
# Set next two channels to perfect correlation
raw._data[2] = new_data
raw._data[3] = new_data * 0.3 # check scale invariance
# Set next two channels to anti correlation
raw._data[4] = new_data
raw._data[5] = new_data * -1.0
# Set next two channels to be uncorrelated
raw._data[6] = new_data
raw._data[7] = rng.rand(raw._data[0].shape[0])
# Set next channel to have zero std
raw._data[8] = 0.
raw._data[9] = 1.
raw._data[10] = 2.
raw._data[11] = 3.
# Check values
sci = scalp_coupling_index(raw)
assert_allclose(sci[0:6], [1, 1, 1, 1, -1, -1], atol=0.01)
assert np.abs(sci[6]) < 0.5
assert np.abs(sci[7]) < 0.5
assert_allclose(sci[8:12], 0, atol=1e-10)
# Ensure function errors if wrong type is passed in
raw = beer_lambert_law(raw, ppf=6)
with pytest.raises(RuntimeError, match='Scalp'):
scalp_coupling_index(raw)
| bsd-3-clause | 9d01d1e397dac8a9ea69a49e2cf9e17e | 34.855263 | 75 | 0.625688 | 2.945946 | false | true | false | false |
mne-tools/mne-python | mne/misc.py | 11 | 2970 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Scott Burns <sburns@nmr.mgh.harvard.edu>
#
# License: BSD-3-Clause
def parse_config(fname):
"""Parse a config file (like .ave and .cov files).
Parameters
----------
fname : str
Config file name.
Returns
-------
conditions : list of dict
Each condition is indexed by the event type.
A condition contains as keys::
tmin, tmax, name, grad_reject, mag_reject,
eeg_reject, eog_reject
"""
reject_params = read_reject_parameters(fname)
with open(fname, 'r') as f:
lines = f.readlines()
cat_ind = [i for i, x in enumerate(lines) if "category {" in x]
event_dict = dict()
for ind in cat_ind:
for k in range(ind + 1, ind + 7):
words = lines[k].split()
if len(words) >= 2:
key = words[0]
if key == 'event':
event = int(words[1])
break
else:
raise ValueError('Could not find event id.')
event_dict[event] = dict(**reject_params)
for k in range(ind + 1, ind + 7):
words = lines[k].split()
if len(words) >= 2:
key = words[0]
if key == 'name':
name = ' '.join(words[1:])
if name[0] == '"':
name = name[1:]
if name[-1] == '"':
name = name[:-1]
event_dict[event]['name'] = name
if key in ['tmin', 'tmax', 'basemin', 'basemax']:
event_dict[event][key] = float(words[1])
return event_dict
def read_reject_parameters(fname):
"""Read rejection parameters from .cov or .ave config file.
Parameters
----------
fname : str
Filename to read.
Returns
-------
params : dict
The rejection parameters.
"""
with open(fname, 'r') as f:
lines = f.readlines()
reject_names = ['gradReject', 'magReject', 'eegReject', 'eogReject',
'ecgReject']
reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
reject = dict()
for line in lines:
words = line.split()
if words[0] in reject_names:
reject[reject_pynames[reject_names.index(words[0])]] = \
float(words[1])
return reject
def read_flat_parameters(fname):
"""Read flat channel rejection parameters from .cov or .ave config file."""
with open(fname, 'r') as f:
lines = f.readlines()
reject_names = ['gradFlat', 'magFlat', 'eegFlat', 'eogFlat', 'ecgFlat']
reject_pynames = ['grad', 'mag', 'eeg', 'eog', 'ecg']
flat = dict()
for line in lines:
words = line.split()
if words[0] in reject_names:
flat[reject_pynames[reject_names.index(words[0])]] = \
float(words[1])
return flat
| bsd-3-clause | 4daa7b0d6e9ff578a720e2ec6d6127db | 28.405941 | 79 | 0.506061 | 3.740554 | false | false | false | false |
mne-tools/mne-python | examples/decoding/decoding_spoc_CMC.py | 9 | 3006 | # -*- coding: utf-8 -*-
"""
.. _ex-spoc-cmc:
====================================
Continuous Target Decoding with SPoC
====================================
Source Power Comodulation (SPoC) :footcite:`DahneEtAl2014` allows to identify
the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from
`Cortico-Muscular Coherence example of FieldTrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>`_
"""
# Author: Alexandre Barachant <alexandre.barachant@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# Define parameters
fname = data_path() / 'SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 200.) # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft']).load_data()
emg.filter(20., None)
# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data()
raw.filter(15., 30.)
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=0.75)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.5, baseline=None,
detrend=1, decim=12)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.5, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# Plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
##############################################################################
# Plot the contributions to the detected components (i.e., the forward model)
spoc.fit(X, y)
spoc.plot_patterns(meg_epochs.info)
##############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause | eb45d7d343835bfb98c3d8c738b8e8bb | 31.322581 | 78 | 0.672655 | 3.15425 | false | false | false | false |
mne-tools/mne-python | mne/io/snirf/_snirf.py | 3 | 21426 | # Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD-3-Clause
import re
import numpy as np
import datetime
from ..base import BaseRaw
from ..meas_info import create_info, _format_dig_points
from ..utils import _mult_cal_one
from ...annotations import Annotations
from ...utils import (logger, verbose, fill_doc, warn, _check_fname,
_import_h5py)
from ..constants import FIFF
from .._digitization import _make_dig_points
from ...transforms import _frame_to_str, apply_trans
from ..nirx.nirx import _convert_fnirs_to_head
from ..._freesurfer import get_mni_fiducials
@fill_doc
def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None):
"""Reader for a continuous wave SNIRF data.
.. note:: This reader supports the .snirf file type only,
not the .jnirs version.
Files with either 3D or 2D locations can be read.
However, we strongly recommend using 3D positions.
If 2D positions are used the behaviour of MNE functions
can not be guaranteed.
Parameters
----------
fname : str
Path to the SNIRF data file.
optode_frame : str
Coordinate frame used for the optode positions. The default is unknown,
in which case the positions are not modified. If a known coordinate
frame is provided (head, meg, mri), then the positions are transformed
in to the Neuromag head coordinate frame (head).
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawSNIRF
A Raw object containing fNIRS data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawSNIRF(fname, optode_frame, preload, verbose)
def _open(fname):
return open(fname, 'r', encoding='latin-1')
@fill_doc
class RawSNIRF(BaseRaw):
"""Raw object from a continuous wave SNIRF file.
Parameters
----------
fname : str
Path to the SNIRF data file.
optode_frame : str
Coordinate frame used for the optode positions. The default is unknown,
in which case the positions are not modified. If a known coordinate
frame is provided (head, meg, mri), then the positions are transformed
in to the Neuromag head coordinate frame (head).
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, fname, optode_frame="unknown",
preload=False, verbose=None):
# Must be here due to circular import error
from ...preprocessing.nirs import _validate_nirs_info
h5py = _import_h5py()
fname = _check_fname(fname, 'read', True, 'fname')
logger.info('Loading %s' % fname)
with h5py.File(fname, 'r') as dat:
if 'data2' in dat['nirs']:
warn("File contains multiple recordings. "
"MNE does not support this feature. "
"Only the first dataset will be processed.")
manufacturer = _get_metadata_str(dat, "ManufacturerName")
if (optode_frame == "unknown") & (manufacturer == "Gowerlabs"):
optode_frame = "head"
snirf_data_type = np.array(dat.get('nirs/data1/measurementList1'
'/dataType')).item()
if snirf_data_type not in [1, 99999]:
# 1 = Continuous Wave
# 99999 = Processed
raise RuntimeError('MNE only supports reading continuous'
' wave amplitude and processed haemoglobin'
' SNIRF files. Expected type'
' code 1 or 99999 but received type '
f'code {snirf_data_type}')
last_samps = dat.get('/nirs/data1/dataTimeSeries').shape[0] - 1
sampling_rate = _extract_sampling_rate(dat)
if sampling_rate == 0:
warn("Unable to extract sample rate from SNIRF file.")
# Extract wavelengths
fnirs_wavelengths = np.array(dat.get('nirs/probe/wavelengths'))
fnirs_wavelengths = [int(w) for w in fnirs_wavelengths]
if len(fnirs_wavelengths) != 2:
raise RuntimeError(f'The data contains '
f'{len(fnirs_wavelengths)}'
f' wavelengths: {fnirs_wavelengths}. '
f'MNE only supports reading continuous'
' wave amplitude SNIRF files '
'with two wavelengths.')
# Extract channels
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [atoi(c) for c in re.split(r'(\d+)', text)]
channels = np.array([name for name in dat['nirs']['data1'].keys()])
channels_idx = np.array(['measurementList' in n for n in channels])
channels = channels[channels_idx]
channels = sorted(channels, key=natural_keys)
# Source and detector labels are optional fields.
# Use S1, S2, S3, etc if not specified.
if 'sourceLabels_disabled' in dat['nirs/probe']:
# This is disabled as
# MNE-Python does not currently support custom source names.
# Instead, sources must be integer values.
sources = np.array(dat.get('nirs/probe/sourceLabels'))
sources = [s.decode('UTF-8') for s in sources]
else:
sources = np.unique([_correct_shape(np.array(dat.get(
'nirs/data1/' + c + '/sourceIndex')))[0]
for c in channels])
sources = [f"S{int(s)}" for s in sources]
if 'detectorLabels_disabled' in dat['nirs/probe']:
# This is disabled as
# MNE-Python does not currently support custom detector names.
# Instead, detector must be integer values.
detectors = np.array(dat.get('nirs/probe/detectorLabels'))
detectors = [d.decode('UTF-8') for d in detectors]
else:
detectors = np.unique([_correct_shape(np.array(dat.get(
'nirs/data1/' + c + '/detectorIndex')))[0]
for c in channels])
detectors = [f"D{int(d)}" for d in detectors]
# Extract source and detector locations
# 3D positions are optional in SNIRF,
# but highly recommended in MNE.
if ('detectorPos3D' in dat['nirs/probe']) &\
('sourcePos3D' in dat['nirs/probe']):
# If 3D positions are available they are used even if 2D exists
detPos3D = np.array(dat.get('nirs/probe/detectorPos3D'))
srcPos3D = np.array(dat.get('nirs/probe/sourcePos3D'))
elif ('detectorPos2D' in dat['nirs/probe']) &\
('sourcePos2D' in dat['nirs/probe']):
warn('The data only contains 2D location information for the '
'optode positions. '
'It is highly recommended that data is used '
'which contains 3D location information for the '
'optode positions. With only 2D locations it can not be '
'guaranteed that MNE functions will behave correctly '
'and produce accurate results. If it is not possible to '
'include 3D positions in your data, please consider '
'using the set_montage() function.')
detPos2D = np.array(dat.get('nirs/probe/detectorPos2D'))
srcPos2D = np.array(dat.get('nirs/probe/sourcePos2D'))
# Set the third dimension to zero. See gh#9308
detPos3D = np.append(detPos2D,
np.zeros((detPos2D.shape[0], 1)), axis=1)
srcPos3D = np.append(srcPos2D,
np.zeros((srcPos2D.shape[0], 1)), axis=1)
else:
raise RuntimeError('No optode location information is '
'provided. MNE requires at least 2D '
'location information')
assert len(sources) == srcPos3D.shape[0]
assert len(detectors) == detPos3D.shape[0]
chnames = []
ch_types = []
for chan in channels:
src_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' +
chan + '/sourceIndex')))[0])
det_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' +
chan + '/detectorIndex')))[0])
if snirf_data_type == 1:
wve_idx = int(_correct_shape(np.array(
dat.get('nirs/data1/' + chan +
'/wavelengthIndex')))[0])
ch_name = sources[src_idx - 1] + '_' +\
detectors[det_idx - 1] + ' ' +\
str(fnirs_wavelengths[wve_idx - 1])
chnames.append(ch_name)
ch_types.append('fnirs_cw_amplitude')
elif snirf_data_type == 99999:
dt_id = _correct_shape(
np.array(dat.get('nirs/data1/' + chan +
'/dataTypeLabel')))[0].decode('UTF-8')
# Convert between SNIRF processed names and MNE type names
dt_id = dt_id.lower().replace("dod", "fnirs_od")
ch_name = sources[src_idx - 1] + '_' + \
detectors[det_idx - 1]
if dt_id == "fnirs_od":
wve_idx = int(_correct_shape(np.array(
dat.get('nirs/data1/' + chan +
'/wavelengthIndex')))[0])
suffix = ' ' + str(fnirs_wavelengths[wve_idx - 1])
else:
suffix = ' ' + dt_id.lower()
ch_name = ch_name + suffix
chnames.append(ch_name)
ch_types.append(dt_id)
# Create mne structure
info = create_info(chnames,
sampling_rate,
ch_types=ch_types)
subject_info = {}
names = np.array(dat.get('nirs/metaDataTags/SubjectID'))
subject_info['first_name'] = \
_correct_shape(names)[0].decode('UTF-8')
# Read non standard (but allowed) custom metadata tags
if 'lastName' in dat.get('nirs/metaDataTags/'):
ln = dat.get('/nirs/metaDataTags/lastName')[0].decode('UTF-8')
subject_info['last_name'] = ln
if 'middleName' in dat.get('nirs/metaDataTags/'):
m = dat.get('/nirs/metaDataTags/middleName')[0].decode('UTF-8')
subject_info['middle_name'] = m
if 'sex' in dat.get('nirs/metaDataTags/'):
s = dat.get('/nirs/metaDataTags/sex')[0].decode('UTF-8')
if s in {'M', 'Male', '1', 'm'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE
elif s in {'F', 'Female', '2', 'f'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE
elif s in {'0', 'u'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
# End non standard name reading
# Update info
info.update(subject_info=subject_info)
length_unit = _get_metadata_str(dat, "LengthUnit")
length_scaling = _get_lengthunit_scaling(length_unit)
srcPos3D /= length_scaling
detPos3D /= length_scaling
if optode_frame in ["mri", "meg"]:
# These are all in MNI or MEG coordinates, so let's transform
# them to the Neuromag head coordinate frame
srcPos3D, detPos3D, _, head_t = _convert_fnirs_to_head(
'fsaverage', optode_frame, 'head', srcPos3D, detPos3D, [])
else:
head_t = np.eye(4)
if optode_frame in ["head", "mri", "meg"]:
# Then the transformation to head was performed above
coord_frame = FIFF.FIFFV_COORD_HEAD
elif 'MNE_coordFrame' in dat.get('nirs/metaDataTags/'):
coord_frame = int(dat.get('/nirs/metaDataTags/MNE_coordFrame')
[0])
else:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
for idx, chan in enumerate(channels):
src_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' +
chan + '/sourceIndex')))[0])
det_idx = int(_correct_shape(np.array(dat.get('nirs/data1/' +
chan + '/detectorIndex')))[0])
info['chs'][idx]['loc'][3:6] = srcPos3D[src_idx - 1, :]
info['chs'][idx]['loc'][6:9] = detPos3D[det_idx - 1, :]
# Store channel as mid point
midpoint = (info['chs'][idx]['loc'][3:6] +
info['chs'][idx]['loc'][6:9]) / 2
info['chs'][idx]['loc'][0:3] = midpoint
info['chs'][idx]['coord_frame'] = coord_frame
if (snirf_data_type in [1]) or \
((snirf_data_type == 99999) and
(ch_types[idx] == "fnirs_od")):
wve_idx = int(_correct_shape(np.array(dat.get(
'nirs/data1/' + chan + '/wavelengthIndex')))[0])
info['chs'][idx]['loc'][9] = fnirs_wavelengths[wve_idx - 1]
if 'landmarkPos3D' in dat.get('nirs/probe/'):
diglocs = np.array(dat.get('/nirs/probe/landmarkPos3D'))
diglocs /= length_scaling
digname = np.array(dat.get('/nirs/probe/landmarkLabels'))
nasion, lpa, rpa, hpi = None, None, None, None
extra_ps = dict()
for idx, dign in enumerate(digname):
dign = dign.lower()
if dign in [b'lpa', b'al']:
lpa = diglocs[idx, :3]
elif dign in [b'nasion']:
nasion = diglocs[idx, :3]
elif dign in [b'rpa', b'ar']:
rpa = diglocs[idx, :3]
else:
extra_ps[f'EEG{len(extra_ps) + 1:03d}'] = \
diglocs[idx, :3]
add_missing_fiducials = (
coord_frame == FIFF.FIFFV_COORD_HEAD and
lpa is None and rpa is None and nasion is None
)
dig = _make_dig_points(
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi,
dig_ch_pos=extra_ps,
coord_frame=_frame_to_str[coord_frame],
add_missing_fiducials=add_missing_fiducials)
else:
ch_locs = [info['chs'][idx]['loc'][0:3]
for idx in range(len(channels))]
# Set up digitization
dig = get_mni_fiducials('fsaverage', verbose=False)
for fid in dig:
fid['r'] = apply_trans(head_t, fid['r'])
fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD
for ii, ch_loc in enumerate(ch_locs, 1):
dig.append(dict(
kind=FIFF.FIFFV_POINT_EEG, # misnomer prob okay
r=ch_loc,
ident=ii,
coord_frame=FIFF.FIFFV_COORD_HEAD,
))
dig = _format_dig_points(dig)
del head_t
with info._unlock():
info['dig'] = dig
str_date = _correct_shape(np.array((dat.get(
'/nirs/metaDataTags/MeasurementDate'))))[0].decode('UTF-8')
str_time = _correct_shape(np.array((dat.get(
'/nirs/metaDataTags/MeasurementTime'))))[0].decode('UTF-8')
str_datetime = str_date + str_time
# Several formats have been observed so we try each in turn
for dt_code in ['%Y-%m-%d%H:%M:%SZ',
'%Y-%m-%d%H:%M:%S']:
try:
meas_date = datetime.datetime.strptime(
str_datetime, dt_code)
except ValueError:
pass
else:
break
else:
warn("Extraction of measurement date from SNIRF file failed. "
"The date is being set to January 1st, 2000, "
f"instead of {str_datetime}")
meas_date = datetime.datetime(2000, 1, 1, 0, 0, 0)
meas_date = meas_date.replace(tzinfo=datetime.timezone.utc)
with info._unlock():
info['meas_date'] = meas_date
if 'DateOfBirth' in dat.get('nirs/metaDataTags/'):
str_birth = np.array((dat.get('/nirs/metaDataTags/'
'DateOfBirth')))[0].decode()
birth_matched = re.fullmatch(r'(\d+)-(\d+)-(\d+)', str_birth)
if birth_matched is not None:
birthday = (int(birth_matched.groups()[0]),
int(birth_matched.groups()[1]),
int(birth_matched.groups()[2]))
with info._unlock():
info["subject_info"]['birthday'] = birthday
super(RawSNIRF, self).__init__(info, preload, filenames=[fname],
last_samps=[last_samps],
verbose=verbose)
# Extract annotations
annot = Annotations([], [], [])
for key in dat['nirs']:
if 'stim' in key:
data = np.atleast_2d(np.array(
dat.get('/nirs/' + key + '/data')))
if data.size > 0:
desc = _correct_shape(np.array(dat.get(
'/nirs/' + key + '/name')))[0]
annot.append(data[:, 0], 1.0, desc.decode('UTF-8'))
self.set_annotations(annot, emit_warning=False)
# Validate that the fNIRS info is correctly formatted
_validate_nirs_info(self.info)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file."""
import h5py
with h5py.File(self._filenames[0], 'r') as dat:
one = dat['/nirs/data1/dataTimeSeries'][start:stop].T
_mult_cal_one(data, one, idx, cals, mult)
# Helper function for when the numpy array has shape (), i.e. just one element.
def _correct_shape(arr):
if arr.shape == ():
arr = arr[np.newaxis]
return arr
def _get_timeunit_scaling(time_unit):
"""MNE expects time in seconds, return required scaling."""
scalings = {'ms': 1000, 's': 1, 'unknown': 1}
if time_unit in scalings:
return scalings[time_unit]
else:
raise RuntimeError(f'The time unit {time_unit} is not supported by '
'MNE. Please report this error as a GitHub '
'issue to inform the developers.')
def _get_lengthunit_scaling(length_unit):
"""MNE expects distance in m, return required scaling."""
scalings = {'m': 1, 'cm': 100, 'mm': 1000}
if length_unit in scalings:
return scalings[length_unit]
else:
raise RuntimeError(f'The length unit {length_unit} is not supported '
'by MNE. Please report this error as a GitHub '
'issue to inform the developers.')
def _extract_sampling_rate(dat):
"""Extract the sample rate from the time field."""
time_data = np.array(dat.get('nirs/data1/time'))
sampling_rate = 0
if len(time_data) == 2:
# specified as onset, samplerate
sampling_rate = 1. / (time_data[1] - time_data[0])
else:
# specified as time points
fs_diff = np.around(np.diff(time_data), decimals=4)
if len(np.unique(fs_diff)) == 1:
# Uniformly sampled data
sampling_rate = 1. / np.unique(fs_diff)
else:
warn("MNE does not currently support reading "
"SNIRF files with non-uniform sampled data.")
time_unit = _get_metadata_str(dat, "TimeUnit")
time_unit_scaling = _get_timeunit_scaling(time_unit)
sampling_rate *= time_unit_scaling
return sampling_rate
def _get_metadata_str(dat, field):
if field not in np.array(dat.get('nirs/metaDataTags')):
return None
data = dat.get(f'/nirs/metaDataTags/{field}')
data = _correct_shape(np.array(data))
data = str(data[0], 'utf-8')
return data
| bsd-3-clause | a0516e79e20bf4410663ad8f9401647a | 42.815951 | 79 | 0.5028 | 4.023662 | false | false | false | false |
mne-tools/mne-python | examples/time_frequency/time_frequency_erds.py | 5 | 8095 | # -*- coding: utf-8 -*-
"""
.. _ex-tfr-erds:
===============================
Compute and visualize ERDS maps
===============================
This example calculates and displays ERDS maps of event-related EEG data.
ERDS (sometimes also written as ERD/ERS) is short for event-related
desynchronization (ERD) and event-related synchronization (ERS)
:footcite:`PfurtschellerLopesdaSilva1999`. Conceptually, ERD corresponds to a
decrease in power in a specific frequency band relative to a baseline.
Similarly, ERS corresponds to an increase in power. An ERDS map is a
time/frequency representation of ERD/ERS over a range of frequencies
:footcite:`GraimannEtAl2002`. ERDS maps are also known as ERSP (event-related
spectral perturbation) :footcite:`Makeig1993`.
In this example, we use an EEG BCI data set containing two different motor
imagery tasks (imagined hand and feet movement). Our goal is to generate ERDS
maps for each of the two tasks.
First, we load the data and create epochs of 5s length. The data set contains
multiple channels, but we will only consider C3, Cz, and C4. We compute maps
containing frequencies ranging from 2 to 35Hz. We map ERD to red color and ERS
to blue color, which is customary in many ERDS publications. Finally, we
perform cluster-based permutation tests to estimate significant ERDS values
(corrected for multiple comparisons within channels).
"""
# Authors: Clemens Brunner <clemens.brunner@gmail.com>
# Felix Klotzsche <klotzsche@cbs.mpg.de>
#
# License: BSD-3-Clause
# %%
# As usual, we import everything we need.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import TwoSlopeNorm
import pandas as pd
import seaborn as sns
import mne
from mne.datasets import eegbci
from mne.io import concatenate_raws, read_raw_edf
from mne.time_frequency import tfr_multitaper
from mne.stats import permutation_cluster_1samp_test as pcluster_test
# %%
# First, we load and preprocess the data. We use runs 6, 10, and 14 from
# subject 1 (these runs contains hand and feet motor imagery).
fnames = eegbci.load_data(subject=1, runs=(6, 10, 14))
raw = concatenate_raws([read_raw_edf(f, preload=True) for f in fnames])
raw.rename_channels(lambda x: x.strip('.')) # remove dots from channel names
events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3))
# %%
# Now we can create 5-second epochs around events of interest.
tmin, tmax = -1, 4
event_ids = dict(hands=2, feet=3) # map event IDs to tasks
epochs = mne.Epochs(raw, events, event_ids, tmin - 0.5, tmax + 0.5,
picks=('C3', 'Cz', 'C4'), baseline=None, preload=True)
# %%
# .. _cnorm-example:
#
# Here we set suitable values for computing ERDS maps. Note especially the
# ``cnorm`` variable, which sets up an *asymmetric* colormap where the middle
# color is mapped to zero, even though zero is not the middle *value* of the
# colormap range. This does two things: it ensures that zero values will be
# plotted in white (given that below we select the ``RdBu`` colormap), and it
# makes synchronization and desynchronization look equally prominent in the
# plots, even though their extreme values are of different magnitudes.
freqs = np.arange(2, 36) # frequencies from 2-35Hz
vmin, vmax = -1, 1.5 # set min and max ERDS values in plot
baseline = (-1, 0) # baseline interval (in s)
cnorm = TwoSlopeNorm(vmin=vmin, vcenter=0, vmax=vmax) # min, center & max ERDS
kwargs = dict(n_permutations=100, step_down_p=0.05, seed=1,
buffer_size=None, out_type='mask') # for cluster test
# %%
# Finally, we perform time/frequency decomposition over all epochs.
tfr = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs, use_fft=True,
return_itc=False, average=False, decim=2)
tfr.crop(tmin, tmax).apply_baseline(baseline, mode="percent")
for event in event_ids:
# select desired epochs for visualization
tfr_ev = tfr[event]
fig, axes = plt.subplots(1, 4, figsize=(12, 4),
gridspec_kw={"width_ratios": [10, 10, 10, 1]})
for ch, ax in enumerate(axes[:-1]): # for each channel
# positive clusters
_, c1, p1, _ = pcluster_test(tfr_ev.data[:, ch], tail=1, **kwargs)
# negative clusters
_, c2, p2, _ = pcluster_test(tfr_ev.data[:, ch], tail=-1, **kwargs)
# note that we keep clusters with p <= 0.05 from the combined clusters
# of two independent tests; in this example, we do not correct for
# these two comparisons
c = np.stack(c1 + c2, axis=2) # combined clusters
p = np.concatenate((p1, p2)) # combined p-values
mask = c[..., p <= 0.05].any(axis=-1)
# plot TFR (ERDS map with masking)
tfr_ev.average().plot([ch], cmap="RdBu", cnorm=cnorm, axes=ax,
colorbar=False, show=False, mask=mask,
mask_style="mask")
ax.set_title(epochs.ch_names[ch], fontsize=10)
ax.axvline(0, linewidth=1, color="black", linestyle=":") # event
if ch != 0:
ax.set_ylabel("")
ax.set_yticklabels("")
fig.colorbar(axes[0].images[-1], cax=axes[-1]).ax.set_yscale("linear")
fig.suptitle(f"ERDS ({event})")
plt.show()
# %%
# Similar to `~mne.Epochs` objects, we can also export data from
# `~mne.time_frequency.EpochsTFR` and `~mne.time_frequency.AverageTFR` objects
# to a :class:`Pandas DataFrame <pandas.DataFrame>`. By default, the `time`
# column of the exported data frame is in milliseconds. Here, to be consistent
# with the time-frequency plots, we want to keep it in seconds, which we can
# achieve by setting ``time_format=None``:
df = tfr.to_data_frame(time_format=None)
df.head()
# %%
# This allows us to use additional plotting functions like
# :func:`seaborn.lineplot` to plot confidence bands:
df = tfr.to_data_frame(time_format=None, long_format=True)
# Map to frequency bands:
freq_bounds = {'_': 0,
'delta': 3,
'theta': 7,
'alpha': 13,
'beta': 35,
'gamma': 140}
df['band'] = pd.cut(df['freq'], list(freq_bounds.values()),
labels=list(freq_bounds)[1:])
# Filter to retain only relevant frequency bands:
freq_bands_of_interest = ['delta', 'theta', 'alpha', 'beta']
df = df[df.band.isin(freq_bands_of_interest)]
df['band'] = df['band'].cat.remove_unused_categories()
# Order channels for plotting:
df['channel'] = df['channel'].cat.reorder_categories(('C3', 'Cz', 'C4'),
ordered=True)
g = sns.FacetGrid(df, row='band', col='channel', margin_titles=True)
g.map(sns.lineplot, 'time', 'value', 'condition', n_boot=10)
axline_kw = dict(color='black', linestyle='dashed', linewidth=0.5, alpha=0.5)
g.map(plt.axhline, y=0, **axline_kw)
g.map(plt.axvline, x=0, **axline_kw)
g.set(ylim=(None, 1.5))
g.set_axis_labels("Time (s)", "ERDS (%)")
g.set_titles(col_template="{col_name}", row_template="{row_name}")
g.add_legend(ncol=2, loc='lower center')
g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.08)
# %%
# Having the data as a DataFrame also facilitates subsetting,
# grouping, and other transforms.
# Here, we use seaborn to plot the average ERDS in the motor imagery interval
# as a function of frequency band and imagery condition:
df_mean = (df.query('time > 1')
.groupby(['condition', 'epoch', 'band', 'channel'])[['value']]
.mean()
.reset_index())
g = sns.FacetGrid(df_mean, col='condition', col_order=['hands', 'feet'],
margin_titles=True)
g = (g.map(sns.violinplot, 'channel', 'value', 'band', n_boot=10,
palette='deep', order=['C3', 'Cz', 'C4'],
hue_order=freq_bands_of_interest,
linewidth=0.5).add_legend(ncol=4, loc='lower center'))
g.map(plt.axhline, **axline_kw)
g.set_axis_labels("", "ERDS (%)")
g.set_titles(col_template="{col_name}", row_template="{row_name}")
g.fig.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.3)
# %%
# References
# ==========
# .. footbibliography::
| bsd-3-clause | 5ae2aa4b2e1076ba1d8b48eb61265f50 | 39.883838 | 79 | 0.659049 | 3.181997 | false | false | false | false |
mne-tools/mne-python | mne/channels/interpolation.py | 8 | 8836 | # Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
from numpy.polynomial.legendre import legval
from ..utils import logger, warn, verbose
from ..io.meas_info import _simplify_info
from ..io.pick import pick_types, pick_channels, pick_info
from ..surface import _normalize_vectors
from ..forward import _map_meg_or_eeg_channels
from ..utils import _check_option, _validate_type
def _calc_h(cosang, stiffness=4, n_legendre_terms=50):
"""Calculate spherical spline h function between points on a sphere.
Parameters
----------
cosang : array-like | float
cosine of angles between pairs of points on a spherical surface. This
is equivalent to the dot product of unit vectors.
stiffness : float
stiffnes of the spline. Also referred to as ``m``.
n_legendre_terms : int
number of Legendre terms to evaluate.
"""
factors = [(2 * n + 1) /
(n ** (stiffness - 1) * (n + 1) ** (stiffness - 1) * 4 * np.pi)
for n in range(1, n_legendre_terms + 1)]
return legval(cosang, [0] + factors)
def _calc_g(cosang, stiffness=4, n_legendre_terms=50):
"""Calculate spherical spline g function between points on a sphere.
Parameters
----------
cosang : array-like of float, shape(n_channels, n_channels)
cosine of angles between pairs of points on a spherical surface. This
is equivalent to the dot product of unit vectors.
stiffness : float
stiffness of the spline.
n_legendre_terms : int
number of Legendre terms to evaluate.
Returns
-------
G : np.ndrarray of float, shape(n_channels, n_channels)
The G matrix.
"""
factors = [(2 * n + 1) / (n ** stiffness * (n + 1) ** stiffness *
4 * np.pi)
for n in range(1, n_legendre_terms + 1)]
return legval(cosang, [0] + factors)
def _make_interpolation_matrix(pos_from, pos_to, alpha=1e-5):
"""Compute interpolation matrix based on spherical splines.
Implementation based on [1]
Parameters
----------
pos_from : np.ndarray of float, shape(n_good_sensors, 3)
The positions to interpoloate from.
pos_to : np.ndarray of float, shape(n_bad_sensors, 3)
The positions to interpoloate.
alpha : float
Regularization parameter. Defaults to 1e-5.
Returns
-------
interpolation : np.ndarray of float, shape(len(pos_from), len(pos_to))
The interpolation matrix that maps good signals to the location
of bad signals.
References
----------
[1] Perrin, F., Pernier, J., Bertrand, O. and Echallier, JF. (1989).
Spherical splines for scalp potential and current density mapping.
Electroencephalography Clinical Neurophysiology, Feb; 72(2):184-7.
"""
from scipy import linalg
pos_from = pos_from.copy()
pos_to = pos_to.copy()
n_from = pos_from.shape[0]
n_to = pos_to.shape[0]
# normalize sensor positions to sphere
_normalize_vectors(pos_from)
_normalize_vectors(pos_to)
# cosine angles between source positions
cosang_from = pos_from.dot(pos_from.T)
cosang_to_from = pos_to.dot(pos_from.T)
G_from = _calc_g(cosang_from)
G_to_from = _calc_g(cosang_to_from)
assert G_from.shape == (n_from, n_from)
assert G_to_from.shape == (n_to, n_from)
if alpha is not None:
G_from.flat[::len(G_from) + 1] += alpha
C = np.vstack([np.hstack([G_from, np.ones((n_from, 1))]),
np.hstack([np.ones((1, n_from)), [[0]]])])
C_inv = linalg.pinv(C)
interpolation = np.hstack([G_to_from, np.ones((n_to, 1))]) @ C_inv[:, :-1]
assert interpolation.shape == (n_to, n_from)
return interpolation
def _do_interp_dots(inst, interpolation, goods_idx, bads_idx):
"""Dot product of channel mapping matrix to channel data."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst')
inst._data[..., bads_idx, :] = np.matmul(
interpolation, inst._data[..., goods_idx, :])
@verbose
def _interpolate_bads_eeg(inst, origin, exclude=None, verbose=None):
if exclude is None:
exclude = list()
bads_idx = np.zeros(len(inst.ch_names), dtype=bool)
goods_idx = np.zeros(len(inst.ch_names), dtype=bool)
picks = pick_types(inst.info, meg=False, eeg=True, exclude=exclude)
inst.info._check_consistency()
bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]
if len(picks) == 0 or bads_idx.sum() == 0:
return
goods_idx[picks] = True
goods_idx[bads_idx] = False
pos = inst._get_channel_positions(picks)
# Make sure only EEG are used
bads_idx_pos = bads_idx[picks]
goods_idx_pos = goods_idx[picks]
# test spherical fit
distance = np.linalg.norm(pos - origin, axis=-1)
distance = np.mean(distance / np.mean(distance))
if np.abs(1. - distance) > 0.1:
warn('Your spherical fit is poor, interpolation results are '
'likely to be inaccurate.')
pos_good = pos[goods_idx_pos] - origin
pos_bad = pos[bads_idx_pos] - origin
logger.info('Computing interpolation matrix from {} sensor '
'positions'.format(len(pos_good)))
interpolation = _make_interpolation_matrix(pos_good, pos_bad)
logger.info('Interpolating {} sensors'.format(len(pos_bad)))
_do_interp_dots(inst, interpolation, goods_idx, bads_idx)
def _interpolate_bads_meg(inst, mode='accurate', origin=(0., 0., 0.04),
verbose=None, ref_meg=False):
return _interpolate_bads_meeg(
inst, mode, origin, ref_meg=ref_meg, eeg=False, verbose=verbose)
@verbose
def _interpolate_bads_meeg(inst, mode='accurate', origin=(0., 0., 0.04),
meg=True, eeg=True, ref_meg=False,
exclude=(), verbose=None):
bools = dict(meg=meg, eeg=eeg)
info = _simplify_info(inst.info)
for ch_type, do in bools.items():
if not do:
continue
kw = dict(meg=False, eeg=False)
kw[ch_type] = True
picks_type = pick_types(info, ref_meg=ref_meg, exclude=exclude, **kw)
picks_good = pick_types(info, ref_meg=ref_meg, exclude='bads', **kw)
use_ch_names = [inst.info['ch_names'][p] for p in picks_type]
bads_type = [ch for ch in inst.info['bads'] if ch in use_ch_names]
if len(bads_type) == 0 or len(picks_type) == 0:
continue
# select the bad channels to be interpolated
picks_bad = pick_channels(inst.info['ch_names'], bads_type,
exclude=[])
if ch_type == 'eeg':
picks_to = picks_type
bad_sel = np.in1d(picks_type, picks_bad)
else:
picks_to = picks_bad
bad_sel = slice(None)
info_from = pick_info(inst.info, picks_good)
info_to = pick_info(inst.info, picks_to)
mapping = _map_meg_or_eeg_channels(
info_from, info_to, mode=mode, origin=origin)
mapping = mapping[bad_sel]
_do_interp_dots(inst, mapping, picks_good, picks_bad)
@verbose
def _interpolate_bads_nirs(inst, method='nearest', exclude=(), verbose=None):
from scipy.spatial.distance import pdist, squareform
from mne.preprocessing.nirs import _validate_nirs_info
if len(pick_types(inst.info, fnirs=True, exclude=())) == 0:
return
# Returns pick of all nirs and ensures channels are correctly ordered
picks_nirs = _validate_nirs_info(inst.info)
nirs_ch_names = [inst.info['ch_names'][p] for p in picks_nirs]
nirs_ch_names = [ch for ch in nirs_ch_names if ch not in exclude]
bads_nirs = [ch for ch in inst.info['bads'] if ch in nirs_ch_names]
if len(bads_nirs) == 0:
return
picks_bad = pick_channels(inst.info['ch_names'], bads_nirs, exclude=[])
bads_mask = [p in picks_bad for p in picks_nirs]
chs = [inst.info['chs'][i] for i in picks_nirs]
locs3d = np.array([ch['loc'][:3] for ch in chs])
_check_option('fnirs_method', method, ['nearest'])
if method == 'nearest':
dist = pdist(locs3d)
dist = squareform(dist)
for bad in picks_bad:
dists_to_bad = dist[bad]
# Ignore distances to self
dists_to_bad[dists_to_bad == 0] = np.inf
# Ignore distances to other bad channels
dists_to_bad[bads_mask] = np.inf
# Find closest remaining channels for same frequency
closest_idx = np.argmin(dists_to_bad) + (bad % 2)
inst._data[bad] = inst._data[closest_idx]
inst.info['bads'] = [ch for ch in inst.info['bads'] if ch in exclude]
return inst
| bsd-3-clause | 7ca348fed1000ea58ee92a228ef157d7 | 35.065306 | 78 | 0.612834 | 3.304413 | false | false | false | false |
mne-tools/mne-python | mne/viz/_mpl_figure.py | 1 | 101757 | # -*- coding: utf-8 -*-
"""Figure classes for MNE-Python's 2D plots.
Class Hierarchy
---------------
MNEFigParams Container object, attached to MNEFigure by default. Sets
close_key='escape' plus whatever other key-value pairs are
passed to its constructor.
matplotlib.figure.Figure
└ MNEFigure
├ MNEBrowseFigure Interactive figure for scrollable data.
│ Generated by:
│ - raw.plot()
│ - epochs.plot()
│ - ica.plot_sources(raw)
│ - ica.plot_sources(epochs)
│
├ MNEAnnotationFigure GUI for adding annotations to Raw
│
├ MNESelectionFigure GUI for spatial channel selection. raw.plot()
│ and epochs.plot() will generate one of these
│ alongside an MNEBrowseFigure when
│ group_by == 'selection' or 'position'
│
└ MNELineFigure Interactive figure for non-scrollable data.
Generated by:
- raw.plot_psd()
- evoked.plot() TODO Not yet implemented
- evoked.plot_white() TODO Not yet implemented
- evoked.plot_joint() TODO Not yet implemented
"""
# Authors: Daniel McCloy <dan@mccloy.info>
#
# License: Simplified BSD
import datetime
import platform
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import get_backend
from matplotlib.figure import Figure
from .. import channel_indices_by_type, pick_types
from ..fixes import _close_event
from ..annotations import _sync_onset
from ..io.pick import (_DATA_CH_TYPES_ORDER_DEFAULT, _DATA_CH_TYPES_SPLIT,
_FNIRS_CH_TYPES_SPLIT, _VALID_CHANNEL_TYPES)
from ..utils import Bunch, _click_ch_name, logger
from . import plot_sensors
from ._figure import BrowserBase
from .utils import (DraggableLine, _events_off, _fake_click, _fake_keypress,
_merge_annotations, _prop_kw, _set_window_title,
_validate_if_list_of_axes, plt_show, _fake_scroll)
name = 'matplotlib'
plt.ion()
BACKEND = get_backend()
# This ↑↑↑↑↑↑↑↑↑↑↑↑↑ does weird things:
# https://github.com/matplotlib/matplotlib/issues/23298
# but wrapping it in ion() context makes it go away (can't actually use
# `with plt.ion()` as context manager, though, for compat reasons).
# Moving this bit to a separate function in ../../fixes.py doesn't work.
plt.ioff()
# CONSTANTS (inches)
ANNOTATION_FIG_PAD = 0.1
ANNOTATION_FIG_MIN_H = 2.9 # fixed part, not including radio buttons/labels
ANNOTATION_FIG_W = 5.0
ANNOTATION_FIG_CHECKBOX_COLUMN_W = 0.5
class MNEFigure(Figure):
"""Base class for 2D figures & dialogs; wraps matplotlib.figure.Figure."""
def __init__(self, **kwargs):
from matplotlib import rcParams
# figsize is the only kwarg we pass to matplotlib Figure()
figsize = kwargs.pop('figsize', None)
super().__init__(figsize=figsize)
# things we'll almost always want
defaults = dict(fgcolor=rcParams['axes.edgecolor'],
bgcolor=rcParams['axes.facecolor'])
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# add param object if not already added (e.g. by BrowserBase)
if not hasattr(self, 'mne'):
from mne.viz._figure import BrowserParams
self.mne = BrowserParams(**kwargs)
else:
for key in [k for k in kwargs if not hasattr(self.mne, k)]:
setattr(self.mne, key, kwargs[key])
def _close(self, event):
"""Handle close events."""
logger.debug(f'Closing {self!r}')
# remove references from parent fig to child fig
is_child = getattr(self.mne, 'parent_fig', None) is not None
is_named = getattr(self.mne, 'fig_name', None) is not None
if is_child:
self.mne.parent_fig.mne.child_figs.remove(self)
if is_named:
setattr(self.mne.parent_fig.mne, self.mne.fig_name, None)
def _keypress(self, event):
"""Handle keypress events."""
if event.key == self.mne.close_key:
plt.close(self)
elif event.key == 'f11': # full screen
self.canvas.manager.full_screen_toggle()
def _buttonpress(self, event):
"""Handle buttonpress events."""
pass
def _pick(self, event):
"""Handle matplotlib pick events."""
pass
def _resize(self, event):
"""Handle window resize events."""
pass
def _add_default_callbacks(self, **kwargs):
"""Remove some matplotlib default callbacks and add MNE-Python ones."""
# Remove matplotlib default keypress catchers
default_callbacks = list(
self.canvas.callbacks.callbacks.get('key_press_event', {}))
for callback in default_callbacks:
self.canvas.callbacks.disconnect(callback)
# add our event callbacks
callbacks = dict(resize_event=self._resize,
key_press_event=self._keypress,
button_press_event=self._buttonpress,
close_event=self._close,
pick_event=self._pick)
callbacks.update(kwargs)
callback_ids = dict()
for event, callback in callbacks.items():
callback_ids[event] = self.canvas.mpl_connect(event, callback)
# store callback references so they aren't garbage-collected
self.mne._callback_ids = callback_ids
def _get_dpi_ratio(self):
"""Get DPI ratio (to handle hi-DPI screens)."""
dpi_ratio = 1.
for key in ('_dpi_ratio', '_device_scale'):
dpi_ratio = getattr(self.canvas, key, dpi_ratio)
return dpi_ratio
def _get_size_px(self):
"""Get figure size in pixels."""
dpi_ratio = self._get_dpi_ratio()
return self.get_size_inches() * self.dpi / dpi_ratio
def _inch_to_rel(self, dim_inches, horiz=True):
"""Convert inches to figure-relative distances."""
fig_w, fig_h = self.get_size_inches()
w_or_h = fig_w if horiz else fig_h
return dim_inches / w_or_h
class MNEAnnotationFigure(MNEFigure):
"""Interactive dialog figure for annotations."""
def _close(self, event):
"""Handle close events (via keypress or window [x])."""
parent = self.mne.parent_fig
# disable span selector
parent.mne.ax_main.selector.active = False
# clear hover line
parent._remove_annotation_hover_line()
# disconnect hover callback
callback_id = parent.mne._callback_ids['motion_notify_event']
parent.canvas.callbacks.disconnect(callback_id)
# do all the other cleanup activities
super()._close(event)
def _keypress(self, event):
"""Handle keypress events."""
text = self.label.get_text()
key = event.key
if key == self.mne.close_key:
plt.close(self)
elif key == 'backspace':
text = text[:-1]
elif key == 'enter':
self.mne.parent_fig._add_annotation_label(event)
return
elif len(key) > 1 or key == ';': # ignore modifier keys
return
else:
text = text + key
self.label.set_text(text)
self.canvas.draw()
def _radiopress(self, event):
"""Handle Radiobutton clicks for Annotation label selection."""
# update which button looks active
buttons = self.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
idx = labels.index(buttons.value_selected)
self._set_active_button(idx)
# update click-drag rectangle color
color = buttons.circles[idx].get_edgecolor()
selector = self.mne.parent_fig.mne.ax_main.selector
# https://github.com/matplotlib/matplotlib/issues/20618
# https://github.com/matplotlib/matplotlib/pull/20693
try: # > 3.4.2
selector.set_props(color=color, facecolor=color)
except AttributeError:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', DeprecationWarning)
selector.rect.set_color(color)
selector.rectprops.update(dict(facecolor=color))
def _click_override(self, event):
"""Override MPL radiobutton click detector to use transData."""
ax = self.mne.radio_ax
buttons = ax.buttons
if (buttons.ignore(event) or event.button != 1 or event.inaxes != ax):
return
pclicked = ax.transData.inverted().transform((event.x, event.y))
distances = {}
for i, (p, t) in enumerate(zip(buttons.circles, buttons.labels)):
if (t.get_window_extent().contains(event.x, event.y)
or np.linalg.norm(pclicked - p.center) < p.radius):
distances[i] = np.linalg.norm(pclicked - p.center)
if len(distances) > 0:
closest = min(distances, key=distances.get)
buttons.set_active(closest)
def _set_active_button(self, idx):
"""Set active button in annotation dialog figure."""
buttons = self.mne.radio_ax.buttons
with _events_off(buttons):
buttons.set_active(idx)
for circle in buttons.circles:
circle.set_facecolor(self.mne.parent_fig.mne.bgcolor)
# active circle gets filled in, partially transparent
color = list(buttons.circles[idx].get_edgecolor())
color[-1] = 0.5
buttons.circles[idx].set_facecolor(color)
self.canvas.draw()
class MNESelectionFigure(MNEFigure):
"""Interactive dialog figure for channel selections."""
def _close(self, event):
"""Handle close events."""
self.mne.parent_fig.mne.child_figs.remove(self)
self.mne.fig_selection = None
# selection fig & main fig tightly integrated; closing one closes both
plt.close(self.mne.parent_fig)
def _keypress(self, event):
"""Handle keypress events."""
if event.key in ('up', 'down', 'b'):
self.mne.parent_fig._keypress(event)
else: # check for close key
super()._keypress(event)
def _radiopress(self, event):
"""Handle RadioButton clicks for channel selection groups."""
selections_dict = self.mne.parent_fig.mne.ch_selections
buttons = self.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
this_label = buttons.value_selected
parent = self.mne.parent_fig
if this_label == 'Custom' and not len(selections_dict['Custom']):
with _events_off(buttons):
buttons.set_active(self.mne.old_selection)
return
# clicking a selection cancels butterfly mode
if parent.mne.butterfly:
parent._toggle_butterfly()
with _events_off(buttons):
buttons.set_active(labels.index(this_label))
parent._update_selection()
def _set_custom_selection(self):
"""Set custom selection by lasso selector."""
chs = self.lasso.selection
parent = self.mne.parent_fig
buttons = self.mne.radio_ax.buttons
if not len(chs):
return
labels = [label.get_text() for label in buttons.labels]
inds = np.in1d(parent.mne.ch_names, chs)
parent.mne.ch_selections['Custom'] = inds.nonzero()[0]
buttons.set_active(labels.index('Custom'))
def _style_radio_buttons_butterfly(self):
"""Handle RadioButton state for keyboard interactions."""
# Show all radio buttons as selected when in butterfly mode
parent = self.mne.parent_fig
buttons = self.mne.radio_ax.buttons
color = (buttons.activecolor if parent.mne.butterfly else
parent.mne.bgcolor)
for circle in buttons.circles:
circle.set_facecolor(color)
# when leaving butterfly mode, make most-recently-used selection active
if not parent.mne.butterfly:
with _events_off(buttons):
buttons.set_active(self.mne.old_selection)
# update the sensors too
parent._update_highlighted_sensors()
class MNEBrowseFigure(BrowserBase, MNEFigure):
"""Interactive figure with scrollbars, for data browsing."""
def __init__(self, inst, figsize, ica=None,
xlabel='Time (s)', **kwargs):
from matplotlib.colors import to_rgba_array
from matplotlib.patches import Rectangle
from matplotlib.ticker import (FixedFormatter, FixedLocator,
FuncFormatter, NullFormatter)
from matplotlib.transforms import blended_transform_factory
from matplotlib.widgets import Button
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.axes_size import Fixed
self.backend_name = 'matplotlib'
kwargs.update({'inst': inst,
'figsize': figsize,
'ica': ica,
'xlabel': xlabel})
BrowserBase.__init__(self, **kwargs)
MNEFigure.__init__(self, **kwargs)
# MAIN AXES: default sizes (inches)
# XXX simpler with constrained_layout? (when it's no longer "beta")
l_margin = 1.
r_margin = 0.1
b_margin = 0.45
t_margin = 0.25
scroll_width = 0.25
hscroll_dist = 0.25
vscroll_dist = 0.1
help_width = scroll_width * 2
# MAIN AXES: default margins (figure-relative coordinates)
left = self._inch_to_rel(l_margin - vscroll_dist - help_width)
right = 1 - self._inch_to_rel(r_margin)
bottom = self._inch_to_rel(b_margin, horiz=False)
top = 1 - self._inch_to_rel(t_margin, horiz=False)
width = right - left
height = top - bottom
position = [left, bottom, width, height]
# Main axes must be a subplot for subplots_adjust to work (so user can
# adjust margins). That's why we don't use the Divider class directly.
ax_main = self.add_subplot(1, 1, 1, position=position)
self.subplotpars.update(left=left, bottom=bottom, top=top, right=right)
div = make_axes_locatable(ax_main)
# this only gets shown in zen mode
self.mne.zen_xlabel = ax_main.set_xlabel(xlabel)
self.mne.zen_xlabel.set_visible(not self.mne.scrollbars_visible)
# make sure background color of the axis is set
if 'bgcolor' in kwargs:
ax_main.set_facecolor(kwargs['bgcolor'])
# SCROLLBARS
ax_hscroll = div.append_axes(position='bottom',
size=Fixed(scroll_width),
pad=Fixed(hscroll_dist))
ax_vscroll = div.append_axes(position='right',
size=Fixed(scroll_width),
pad=Fixed(vscroll_dist))
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel(xlabel)
ax_vscroll.set_axis_off()
# HORIZONTAL SCROLLBAR PATCHES (FOR MARKING BAD EPOCHS)
if self.mne.is_epochs:
epoch_nums = self.mne.inst.selection
for ix, _ in enumerate(epoch_nums):
start = self.mne.boundary_times[ix]
width = np.diff(self.mne.boundary_times[:2])[0]
ax_hscroll.add_patch(
Rectangle((start, 0), width, 1, color='none',
zorder=self.mne.zorder['patch']))
# both axes, major ticks: gridlines
for _ax in (ax_main, ax_hscroll):
_ax.xaxis.set_major_locator(
FixedLocator(self.mne.boundary_times[1:-1]))
_ax.xaxis.set_major_formatter(NullFormatter())
grid_kwargs = dict(color=self.mne.fgcolor, axis='x',
zorder=self.mne.zorder['grid'])
ax_main.grid(linewidth=2, linestyle='dashed', **grid_kwargs)
ax_hscroll.grid(alpha=0.5, linewidth=0.5, linestyle='solid',
**grid_kwargs)
# main axes, minor ticks: ticklabel (epoch number) for every epoch
ax_main.xaxis.set_minor_locator(FixedLocator(self.mne.midpoints))
ax_main.xaxis.set_minor_formatter(FixedFormatter(epoch_nums))
# hscroll axes, minor ticks: up to 20 ticklabels (epoch numbers)
ax_hscroll.xaxis.set_minor_locator(
FixedLocator(self.mne.midpoints, nbins=20))
ax_hscroll.xaxis.set_minor_formatter(
FuncFormatter(lambda x, pos: self._get_epoch_num_from_time(x)))
# hide some ticks
ax_main.tick_params(axis='x', which='major', bottom=False)
ax_hscroll.tick_params(axis='x', which='both', bottom=False)
else:
# RAW / ICA X-AXIS TICK & LABEL FORMATTING
ax_main.xaxis.set_major_formatter(
FuncFormatter(partial(self._xtick_formatter,
ax_type='main')))
ax_hscroll.xaxis.set_major_formatter(
FuncFormatter(partial(self._xtick_formatter,
ax_type='hscroll')))
if self.mne.time_format != 'float':
for _ax in (ax_main, ax_hscroll):
_ax.set_xlabel('Time (HH:MM:SS)')
# VERTICAL SCROLLBAR PATCHES (COLORED BY CHANNEL TYPE)
ch_order = self.mne.ch_order
for ix, pick in enumerate(ch_order):
this_color = (self.mne.ch_color_bad
if self.mne.ch_names[pick] in self.mne.info['bads']
else self.mne.ch_color_dict)
if isinstance(this_color, dict):
this_color = this_color[self.mne.ch_types[pick]]
ax_vscroll.add_patch(
Rectangle((0, ix), 1, 1, color=this_color,
zorder=self.mne.zorder['patch']))
ax_vscroll.set_ylim(len(ch_order), 0)
ax_vscroll.set_visible(not self.mne.butterfly)
# SCROLLBAR VISIBLE SELECTION PATCHES
sel_kwargs = dict(alpha=0.3, linewidth=4, clip_on=False,
edgecolor=self.mne.fgcolor)
vsel_patch = Rectangle((0, 0), 1, self.mne.n_channels,
facecolor=self.mne.bgcolor, **sel_kwargs)
ax_vscroll.add_patch(vsel_patch)
hsel_facecolor = np.average(
np.vstack((to_rgba_array(self.mne.fgcolor),
to_rgba_array(self.mne.bgcolor))),
axis=0, weights=(3, 1)) # 75% foreground, 25% background
hsel_patch = Rectangle((self.mne.t_start, 0), self.mne.duration, 1,
facecolor=hsel_facecolor, **sel_kwargs)
ax_hscroll.add_patch(hsel_patch)
ax_hscroll.set_xlim(self.mne.first_time, self.mne.first_time +
self.mne.n_times / self.mne.info['sfreq'])
# VLINE
vline_color = (0., 0.75, 0.)
vline_kwargs = dict(visible=False, zorder=self.mne.zorder['vline'])
if self.mne.is_epochs:
x = np.arange(self.mne.n_epochs)
vline = ax_main.vlines(
x, 0, 1, colors=vline_color, **vline_kwargs)
vline.set_transform(blended_transform_factory(ax_main.transData,
ax_main.transAxes))
vline_hscroll = None
else:
vline = ax_main.axvline(0, color=vline_color, **vline_kwargs)
vline_hscroll = ax_hscroll.axvline(0, color=vline_color,
**vline_kwargs)
vline_text = ax_main.annotate(
'', xy=(0, 0), xycoords='axes fraction', xytext=(-2, 0),
textcoords='offset points', fontsize=10, ha='right', va='center',
color=vline_color, **vline_kwargs)
# HELP BUTTON: initialize in the wrong spot...
ax_help = div.append_axes(position='left',
size=Fixed(help_width),
pad=Fixed(vscroll_dist))
# HELP BUTTON: ...move it down by changing its locator
loc = div.new_locator(nx=0, ny=0)
ax_help.set_axes_locator(loc)
# HELP BUTTON: make it a proper button
with _patched_canvas(ax_help.figure):
self.mne.button_help = Button(ax_help, 'Help')
# PROJ BUTTON
ax_proj = None
if len(self.mne.projs) and not self.mne.inst.proj:
proj_button_pos = [
1 - self._inch_to_rel(r_margin + scroll_width), # left
self._inch_to_rel(b_margin, horiz=False), # bottom
self._inch_to_rel(scroll_width), # width
self._inch_to_rel(scroll_width, horiz=False) # height
]
loc = div.new_locator(nx=4, ny=0)
ax_proj = self.add_axes(proj_button_pos)
ax_proj.set_axes_locator(loc)
with _patched_canvas(ax_help.figure):
self.mne.button_proj = Button(ax_proj, 'Prj')
# INIT TRACES
self.mne.trace_kwargs = dict(antialiased=True, linewidth=0.5)
self.mne.traces = ax_main.plot(
np.full((1, self.mne.n_channels), np.nan), **self.mne.trace_kwargs)
# SAVE UI ELEMENT HANDLES
vars(self.mne).update(
ax_main=ax_main, ax_help=ax_help, ax_proj=ax_proj,
ax_hscroll=ax_hscroll, ax_vscroll=ax_vscroll,
vsel_patch=vsel_patch, hsel_patch=hsel_patch, vline=vline,
vline_hscroll=vline_hscroll, vline_text=vline_text)
def _get_size(self):
return self.get_size_inches()
def _resize(self, event):
"""Handle resize event for mne_browse-style plots (Raw/Epochs/ICA)."""
old_width, old_height = self.mne.fig_size_px
new_width, new_height = self._get_size_px()
new_margins = _calc_new_margins(
self, old_width, old_height, new_width, new_height)
self.subplots_adjust(**new_margins)
# zen mode bookkeeping
self.mne.zen_w *= old_width / new_width
self.mne.zen_h *= old_height / new_height
self.mne.fig_size_px = (new_width, new_height)
self.canvas.draw_idle()
def _hover(self, event):
"""Handle motion event when annotating."""
if (event.button is not None or event.xdata is None or
event.inaxes != self.mne.ax_main):
return
if not self.mne.draggable_annotations:
self._remove_annotation_hover_line()
return
from matplotlib.patheffects import Normal, Stroke
for coll in self.mne.annotations:
if coll.contains(event)[0]:
path = coll.get_paths()
assert len(path) == 1
path = path[0]
color = coll.get_edgecolors()[0]
ylim = self.mne.ax_main.get_ylim()
# are we on the left or right edge?
_l = path.vertices[:, 0].min()
_r = path.vertices[:, 0].max()
x = _l if abs(event.xdata - _l) < abs(event.xdata - _r) else _r
mask = path.vertices[:, 0] == x
def drag_callback(x0):
path.vertices[mask, 0] = x0
# create or update the DraggableLine
hover_line = self.mne.annotation_hover_line
if hover_line is None:
line = self.mne.ax_main.plot([x, x], ylim, color=color,
linewidth=2, pickradius=5.)[0]
hover_line = DraggableLine(
line, self._modify_annotation, drag_callback)
else:
hover_line.set_x(x)
hover_line.drag_callback = drag_callback
# style the line
line = hover_line.line
patheff = [Stroke(linewidth=4, foreground=color, alpha=0.5),
Normal()]
line.set_path_effects(patheff if line.contains(event)[0] else
patheff[1:])
self.mne.ax_main.selector.active = False
self.mne.annotation_hover_line = hover_line
self.canvas.draw_idle()
return
self._remove_annotation_hover_line()
def _keypress(self, event):
"""Handle keypress events."""
key = event.key
n_channels = self.mne.n_channels
if self.mne.is_epochs:
last_time = self.mne.n_times / self.mne.info['sfreq']
else:
last_time = self.mne.inst.times[-1]
# scroll up/down
if key in ('down', 'up', 'shift+down', 'shift+up'):
key = key.split('+')[-1]
direction = -1 if key == 'up' else 1
# butterfly case
if self.mne.butterfly:
return
# group_by case
elif self.mne.fig_selection is not None:
buttons = self.mne.fig_selection.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
current_label = buttons.value_selected
current_idx = labels.index(current_label)
selections_dict = self.mne.ch_selections
penult = current_idx < (len(labels) - 1)
pre_penult = current_idx < (len(labels) - 2)
has_custom = selections_dict.get('Custom', None) is not None
def_custom = len(selections_dict.get('Custom', list()))
up_ok = key == 'up' and current_idx > 0
down_ok = key == 'down' and (
pre_penult or
(penult and not has_custom) or
(penult and has_custom and def_custom))
if up_ok or down_ok:
buttons.set_active(current_idx + direction)
# normal case
else:
ceiling = len(self.mne.ch_order) - n_channels
ch_start = self.mne.ch_start + direction * n_channels
self.mne.ch_start = np.clip(ch_start, 0, ceiling)
self._update_picks()
self._update_vscroll()
self._redraw()
# scroll left/right
elif key in ('right', 'left', 'shift+right', 'shift+left'):
old_t_start = self.mne.t_start
direction = 1 if key.endswith('right') else -1
if self.mne.is_epochs:
denom = 1 if key.startswith('shift') else self.mne.n_epochs
else:
denom = 1 if key.startswith('shift') else 4
t_max = last_time - self.mne.duration
t_start = self.mne.t_start + direction * self.mne.duration / denom
self.mne.t_start = np.clip(t_start, self.mne.first_time, t_max)
if self.mne.t_start != old_t_start:
self._update_hscroll()
self._redraw(annotations=True)
# scale traces
elif key in ('=', '+', '-'):
scaler = 1 / 1.1 if key == '-' else 1.1
self.mne.scale_factor *= scaler
self._redraw(update_data=False)
# change number of visible channels
elif (key in ('pageup', 'pagedown') and
self.mne.fig_selection is None and
not self.mne.butterfly):
new_n_ch = n_channels + (1 if key == 'pageup' else -1)
self.mne.n_channels = np.clip(new_n_ch, 1, len(self.mne.ch_order))
# add new chs from above if we're at the bottom of the scrollbar
ch_end = self.mne.ch_start + self.mne.n_channels
if ch_end > len(self.mne.ch_order) and self.mne.ch_start > 0:
self.mne.ch_start -= 1
self._update_vscroll()
# redraw only if changed
if self.mne.n_channels != n_channels:
self._update_picks()
self._update_trace_offsets()
self._redraw(annotations=True)
# change duration
elif key in ('home', 'end'):
old_dur = self.mne.duration
dur_delta = 1 if key == 'end' else -1
if self.mne.is_epochs:
# prevent from showing zero epochs, or more epochs than we have
self.mne.n_epochs = np.clip(self.mne.n_epochs + dur_delta,
1, len(self.mne.inst))
# use the length of one epoch as duration change
min_dur = len(self.mne.inst.times) / self.mne.info['sfreq']
new_dur = self.mne.duration + dur_delta * min_dur
else:
# never show fewer than 3 samples
min_dur = 3 * np.diff(self.mne.inst.times[:2])[0]
# use multiplicative dur_delta
dur_delta = 5 / 4 if dur_delta > 0 else 4 / 5
new_dur = self.mne.duration * dur_delta
self.mne.duration = np.clip(new_dur, min_dur, last_time)
if self.mne.duration != old_dur:
if self.mne.t_start + self.mne.duration > last_time:
self.mne.t_start = last_time - self.mne.duration
self._update_hscroll()
self._redraw(annotations=True)
elif key == '?': # help window
self._toggle_help_fig(event)
elif key == 'a': # annotation mode
self._toggle_annotation_fig()
elif key == 'b' and self.mne.instance_type != 'ica': # butterfly mode
self._toggle_butterfly()
elif key == 'd': # DC shift
self.mne.remove_dc = not self.mne.remove_dc
self._redraw()
elif key == 'h': # histogram
self._toggle_epoch_histogram()
elif key == 'j' and len(self.mne.projs): # SSP window
self._toggle_proj_fig()
elif key == 'J' and len(self.mne.projs):
self._toggle_proj_checkbox(event, toggle_all=True)
elif key == 'p': # toggle draggable annotations
self._toggle_draggable_annotations(event)
if self.mne.fig_annotation is not None:
checkbox = self.mne.fig_annotation.mne.drag_checkbox
with _events_off(checkbox):
checkbox.set_active(0)
elif key == 's': # scalebars
self._toggle_scalebars(event)
elif key == 'w': # toggle noise cov whitening
self._toggle_whitening()
elif key == 'z': # zen mode: hide scrollbars and buttons
self._toggle_scrollbars()
self._redraw(update_data=False)
elif key == 't':
self._toggle_time_format()
else: # check for close key / fullscreen toggle
super()._keypress(event)
def _buttonpress(self, event):
"""Handle mouse clicks."""
butterfly = self.mne.butterfly
annotating = self.mne.fig_annotation is not None
ax_main = self.mne.ax_main
inst = self.mne.inst
# ignore middle clicks, scroll wheel events, and clicks outside axes
if event.button not in (1, 3) or event.inaxes is None:
return
elif event.button == 1: # left-click (primary)
# click in main axes
if (event.inaxes == ax_main and not annotating):
if self.mne.instance_type == 'epochs' or not butterfly:
for line in self.mne.traces + self.mne.epoch_traces:
if line.contains(event)[0]:
if self.mne.instance_type == 'epochs':
self._toggle_bad_epoch(event)
else:
idx = self.mne.traces.index(line)
self._toggle_bad_channel(idx)
return
self._show_vline(event.xdata) # butterfly / not on data trace
self._redraw(update_data=False, annotations=False)
return
# click in vertical scrollbar
elif event.inaxes == self.mne.ax_vscroll:
if self.mne.fig_selection is not None:
self._change_selection_vscroll(event)
elif self._check_update_vscroll_clicked(event):
self._redraw()
# click in horizontal scrollbar
elif event.inaxes == self.mne.ax_hscroll:
if self._check_update_hscroll_clicked(event):
self._redraw(annotations=True)
# click on proj button
elif event.inaxes == self.mne.ax_proj:
self._toggle_proj_fig(event)
# click on help button
elif event.inaxes == self.mne.ax_help:
self._toggle_help_fig(event)
else: # right-click (secondary)
if annotating:
if any(c.contains(event)[0] for c in ax_main.collections):
xdata = event.xdata - self.mne.first_time
start = _sync_onset(inst, inst.annotations.onset)
end = start + inst.annotations.duration
ann_idx = np.where((xdata > start) & (xdata < end))[0]
for idx in sorted(ann_idx)[::-1]:
# only remove visible annotation spans
descr = inst.annotations[idx]['description']
if self.mne.visible_annotations[descr]:
inst.annotations.delete(idx)
self._remove_annotation_hover_line()
self._draw_annotations()
self.canvas.draw_idle()
elif event.inaxes == ax_main:
self._toggle_vline(False)
def _pick(self, event):
"""Handle matplotlib pick events."""
from matplotlib.text import Text
if self.mne.butterfly:
return
# clicked on channel name
if isinstance(event.artist, Text):
ch_name = event.artist.get_text()
ind = self.mne.ch_names[self.mne.picks].tolist().index(ch_name)
if event.mouseevent.button == 1: # left click
self._toggle_bad_channel(ind)
elif event.mouseevent.button == 3: # right click
self._create_ch_context_fig(ind)
def _create_ch_context_fig(self, idx):
fig = super()._create_ch_context_fig(idx)
plt_show(fig=fig)
def _new_child_figure(self, fig_name, **kwargs):
"""Instantiate a new MNE dialog figure (with event listeners)."""
fig = _figure(toolbar=False, parent_fig=self, fig_name=fig_name,
**kwargs)
fig._add_default_callbacks()
self.mne.child_figs.append(fig)
if isinstance(fig_name, str):
setattr(self.mne, fig_name, fig)
return fig
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# HELP DIALOG
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_help_fig(self):
"""Create help dialog window."""
text = {key: val for key, val in self._get_help_text().items()
if val is not None}
keys = ''
vals = ''
for key, val in text.items():
newsection = '\n' if key.startswith('_') else ''
key = key[1:] if key.startswith('_') else key
newlines = '\n' * len(val.split('\n')) # handle multiline values
keys += f'{newsection}{key} {newlines}'
vals += f'{newsection}{val}\n'
# calc figure size
n_lines = len(keys.split('\n'))
longest_key = max(len(k) for k in text.keys())
longest_val = max(max(len(w) for w in v.split('\n')) if '\n' in v else
len(v) for v in text.values())
width = (longest_key + longest_val) / 12
height = (n_lines) / 5
# create figure and axes
fig = self._new_child_figure(figsize=(width, height),
fig_name='fig_help',
window_title='Help')
ax = fig.add_axes((0.01, 0.01, 0.98, 0.98))
ax.set_axis_off()
kwargs = dict(va='top', linespacing=1.5, usetex=False)
ax.text(0.42, 1, keys, ma='right', ha='right', **kwargs)
ax.text(0.42, 1, vals, ma='left', ha='left', **kwargs)
def _toggle_help_fig(self, event):
"""Show/hide the help dialog window."""
if self.mne.fig_help is None:
self._create_help_fig()
plt_show(fig=self.mne.fig_help)
else:
plt.close(self.mne.fig_help)
def _get_help_text(self):
"""Generate help dialog text; `None`-valued entries removed later."""
inst = self.mne.instance_type
is_raw = inst == 'raw'
is_epo = inst == 'epochs'
is_ica = inst == 'ica'
has_proj = bool(len(self.mne.projs))
# adapt keys to different platforms
is_mac = platform.system() == 'Darwin'
dur_keys = ('fn + ←', 'fn + →') if is_mac else ('Home', 'End')
ch_keys = ('fn + ↑', 'fn + ↓') if is_mac else ('Page up', 'Page down')
# adapt descriptions to different instance types
ch_cmp = 'component' if is_ica else 'channel'
ch_epo = 'epoch' if is_epo else 'channel'
ica_bad = 'Mark/unmark component for exclusion'
dur_vals = ([f'Show {n} epochs' for n in ('fewer', 'more')]
if self.mne.is_epochs else
[f'Show {d} time window' for d in ('shorter', 'longer')])
ch_vals = [f'{inc_dec} number of visible {ch_cmp}s' for inc_dec in
('Increase', 'Decrease')]
lclick_data = ica_bad if is_ica else f'Mark/unmark bad {ch_epo}'
lclick_name = (ica_bad if is_ica else 'Mark/unmark bad channel')
rclick_name = dict(ica='Show diagnostics for component',
epochs='Show imageplot for channel',
raw='Show channel location')[inst]
# TODO not yet implemented
# ldrag = ('Show spectrum plot for selected time span;\nor (in '
# 'annotation mode) add annotation') if inst== 'raw' else None
ldrag = 'add annotation (in annotation mode)' if is_raw else None
noise_cov = (None if self.mne.noise_cov is None else
'Toggle signal whitening')
scrl = '1 epoch' if self.mne.is_epochs else '¼ window'
# below, value " " is a hack to make "\n".split(value) have length 1
help_text = OrderedDict([
('_NAVIGATION', ' '),
('→', f'Scroll {scrl} right (scroll full window with Shift + →)'),
('←', f'Scroll {scrl} left (scroll full window with Shift + ←)'),
(dur_keys[0], dur_vals[0]),
(dur_keys[1], dur_vals[1]),
('↑', f'Scroll up ({ch_cmp}s)'),
('↓', f'Scroll down ({ch_cmp}s)'),
(ch_keys[0], ch_vals[0]),
(ch_keys[1], ch_vals[1]),
('_SIGNAL TRANSFORMATIONS', ' '),
('+ or =', 'Increase signal scaling'),
('-', 'Decrease signal scaling'),
('b', 'Toggle butterfly mode' if not is_ica else None),
('d', 'Toggle DC removal' if is_raw else None),
('w', noise_cov),
('_USER INTERFACE', ' '),
('a', 'Toggle annotation mode' if is_raw else None),
('h', 'Toggle peak-to-peak histogram' if is_epo else None),
('j', 'Toggle SSP projector window' if has_proj else None),
('shift+j', 'Toggle all SSPs'),
('p', 'Toggle draggable annotations' if is_raw else None),
('s', 'Toggle scalebars' if not is_ica else None),
('z', 'Toggle scrollbars'),
('t', 'Toggle time format' if not is_epo else None),
('F11', 'Toggle fullscreen' if not is_mac else None),
('?', 'Open this help window'),
('esc', 'Close focused figure or dialog window'),
('_MOUSE INTERACTION', ' '),
(f'Left-click {ch_cmp} name', lclick_name),
(f'Left-click {ch_cmp} data', lclick_data),
('Left-click-and-drag on plot', ldrag),
('Left-click on plot background', 'Place vertical guide'),
('Right-click on plot background', 'Clear vertical guide'),
('Right-click on channel name', rclick_name)
])
return help_text
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# ANNOTATIONS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_annotation_fig(self):
"""Create the annotation dialog window."""
from matplotlib.widgets import Button, CheckButtons, SpanSelector
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from mpl_toolkits.axes_grid1.axes_size import Fixed
# make figure
labels = np.array(sorted(set(self.mne.inst.annotations.description)))
radio_button_h = self._compute_annotation_figsize(len(labels))
figsize = (ANNOTATION_FIG_W, ANNOTATION_FIG_MIN_H + radio_button_h)
fig = self._new_child_figure(figsize=figsize,
FigureClass=MNEAnnotationFigure,
fig_name='fig_annotation',
window_title='Annotations')
# make main axes
left = fig._inch_to_rel(ANNOTATION_FIG_PAD)
bottom = fig._inch_to_rel(ANNOTATION_FIG_PAD, horiz=False)
width = 1 - 2 * left
height = 1 - 2 * bottom
fig.mne.radio_ax = fig.add_axes((left, bottom, width, height),
frame_on=False, aspect='equal')
div = make_axes_locatable(fig.mne.radio_ax)
# append show/hide checkboxes at right
fig.mne.show_hide_ax = div.append_axes(
position='right', size=Fixed(ANNOTATION_FIG_CHECKBOX_COLUMN_W),
pad=Fixed(ANNOTATION_FIG_PAD), aspect='equal',
sharey=fig.mne.radio_ax)
# populate w/ radio buttons & labels
self._update_annotation_fig()
# append instructions at top
instructions_ax = div.append_axes(position='top', size=Fixed(1),
pad=Fixed(5 * ANNOTATION_FIG_PAD))
# XXX when we support a newer matplotlib (something >3.0) the
# instructions can have inline bold formatting:
# instructions = '\n'.join(
# [r'$\mathbf{Left‐click~&~drag~on~plot:}$ create/modify annotation', # noqa E501
# r'$\mathbf{Right‐click~on~plot~annotation:}$ delete annotation',
# r'$\mathbf{Type~in~annotation~window:}$ modify new label name',
# r'$\mathbf{Enter~(or~click~button):}$ add new label to list',
# r'$\mathbf{Esc:}$ exit annotation mode & close this window'])
instructions = '\n'.join(
['Left click & drag on plot: create/modify annotation',
'Right click on annotation highlight: delete annotation',
'Type in this window: modify new label name',
'Enter (or click button): add new label to list',
'Esc: exit annotation mode & close this dialog window'])
instructions_ax.text(0, 1, instructions, va='top', ha='left',
linespacing=1.7,
usetex=False) # force use of MPL mathtext parser
instructions_ax.set_axis_off()
# append text entry axes at bottom
text_entry_ax = div.append_axes(position='bottom',
size=Fixed(3 * ANNOTATION_FIG_PAD),
pad=Fixed(ANNOTATION_FIG_PAD))
text_entry_ax.text(0.4, 0.5, 'New label:', va='center', ha='right',
weight='bold')
fig.label = text_entry_ax.text(0.5, 0.5, 'BAD_', va='center',
ha='left')
text_entry_ax.set_axis_off()
# append button at bottom
button_ax = div.append_axes(position='bottom',
size=Fixed(3 * ANNOTATION_FIG_PAD),
pad=Fixed(ANNOTATION_FIG_PAD))
fig.button = Button(button_ax, 'Add new label')
fig.button.on_clicked(self._add_annotation_label)
plt_show(fig=fig)
# add "draggable" checkbox
drag_ax_height = 3 * ANNOTATION_FIG_PAD
drag_ax = div.append_axes('bottom', size=Fixed(drag_ax_height),
pad=Fixed(ANNOTATION_FIG_PAD),
aspect='equal')
checkbox = CheckButtons(drag_ax, labels=('Draggable edges?',),
actives=(self.mne.draggable_annotations,))
checkbox.on_clicked(self._toggle_draggable_annotations)
fig.mne.drag_checkbox = checkbox
# reposition & resize axes
width_in, height_in = fig.get_size_inches()
width_ax = fig._inch_to_rel(width_in
- ANNOTATION_FIG_CHECKBOX_COLUMN_W
- 3 * ANNOTATION_FIG_PAD)
aspect = width_ax / fig._inch_to_rel(drag_ax_height)
drag_ax.set_xlim(0, aspect)
drag_ax.set_axis_off()
# reposition & resize checkbox & label
rect = checkbox.rectangles[0]
_pad, _size = (0.2, 0.6)
rect.set_bounds(_pad, _pad, _size, _size)
lines = checkbox.lines[0]
for line, direction in zip(lines, (1, -1)):
line.set_xdata((_pad, _pad + _size)[::direction])
line.set_ydata((_pad, _pad + _size))
text = checkbox.labels[0]
text.set(position=(3 * _pad + _size, 0.45), va='center')
for artist in lines + (rect, text):
artist.set_transform(drag_ax.transData)
# setup interactivity in plot window
col = ('#ff0000' if len(fig.mne.radio_ax.buttons.circles) < 1 else
fig.mne.radio_ax.buttons.circles[0].get_edgecolor())
# TODO: we would like useblit=True here, but it behaves oddly when the
# first span is dragged (subsequent spans seem to work OK)
rect_kw = _prop_kw('rect', dict(alpha=0.5, facecolor=col))
selector = SpanSelector(self.mne.ax_main, self._select_annotation_span,
'horizontal', minspan=0.1, useblit=False,
**rect_kw)
self.mne.ax_main.selector = selector
self.mne._callback_ids['motion_notify_event'] = \
self.canvas.mpl_connect('motion_notify_event', self._hover)
def _toggle_visible_annotations(self, event):
"""Enable/disable display of annotations on a per-label basis."""
checkboxes = self.mne.show_hide_annotation_checkboxes
labels = [t.get_text() for t in checkboxes.labels]
actives = checkboxes.get_status()
self.mne.visible_annotations = dict(zip(labels, actives))
self._redraw(update_data=False, annotations=True)
def _toggle_draggable_annotations(self, event):
"""Enable/disable draggable annotation edges."""
self.mne.draggable_annotations = not self.mne.draggable_annotations
def _update_annotation_fig(self):
"""Draw or redraw the radio buttons and annotation labels."""
from matplotlib.widgets import CheckButtons, RadioButtons
# define shorthand variables
fig = self.mne.fig_annotation
ax = fig.mne.radio_ax
labels = self._get_annotation_labels()
# compute new figsize
radio_button_h = self._compute_annotation_figsize(len(labels))
fig.set_size_inches(ANNOTATION_FIG_W,
ANNOTATION_FIG_MIN_H + radio_button_h,
forward=True)
# populate center axes with labels & radio buttons
ax.clear()
title = 'Existing labels:' if len(labels) else 'No existing labels'
ax.set_title(title, size=None, loc='left')
ax.buttons = RadioButtons(ax, labels)
# adjust xlim to keep equal aspect & full width (keep circles round)
aspect = (ANNOTATION_FIG_W - ANNOTATION_FIG_CHECKBOX_COLUMN_W
- 3 * ANNOTATION_FIG_PAD) / radio_button_h
ax.set_xlim((0, aspect))
# style the buttons & adjust spacing
radius = 0.15
circles = ax.buttons.circles
for circle, label in zip(circles, ax.buttons.labels):
circle.set_transform(ax.transData)
center = ax.transData.inverted().transform(
ax.transAxes.transform((0.1, 0)))
circle.set_center((center[0], circle.center[1]))
circle.set_edgecolor(
self.mne.annotation_segment_colors[label.get_text()])
circle.set_linewidth(4)
circle.set_radius(radius / len(labels))
# style the selected button
if len(labels):
fig._set_active_button(0)
# add event listeners
ax.buttons.disconnect_events() # clear MPL default listeners
ax.buttons.on_clicked(fig._radiopress)
ax.buttons.connect_event('button_press_event', fig._click_override)
# now do the show/hide checkboxes
show_hide_ax = fig.mne.show_hide_ax
show_hide_ax.clear()
show_hide_ax.set_axis_off()
aspect = ANNOTATION_FIG_CHECKBOX_COLUMN_W / radio_button_h
show_hide_ax.set(xlim=(0, aspect), ylim=(0, 1))
# ensure new labels have checkbox values
check_values = {label: False for label in labels}
check_values.update(self.mne.visible_annotations) # existing checks
actives = [check_values[label] for label in labels]
# regenerate checkboxes
checkboxes = CheckButtons(ax=fig.mne.show_hide_ax,
labels=labels,
actives=actives)
checkboxes.on_clicked(self._toggle_visible_annotations)
# add title, hide labels
show_hide_ax.set_title('show/\nhide ', size=None, loc='right')
for label in checkboxes.labels:
label.set_visible(False)
# fix aspect and right-align
if len(labels) == 1:
bounds = (0.05, 0.375, 0.25, 0.25) # undo MPL special case
checkboxes.rectangles[0].set_bounds(bounds)
for line, step in zip(checkboxes.lines[0], (1, -1)):
line.set_xdata((bounds[0], bounds[0] + bounds[2]))
line.set_ydata((bounds[1], bounds[1] + bounds[3])[::step])
for rect in checkboxes.rectangles:
rect.set_transform(show_hide_ax.transData)
bbox = rect.get_bbox()
bounds = (aspect, bbox.ymin, -bbox.width, bbox.height)
rect.set_bounds(bounds)
rect.set_clip_on(False)
for line in np.array(checkboxes.lines).ravel():
line.set_transform(show_hide_ax.transData)
line.set_xdata(aspect + 0.05 - np.array(line.get_xdata()))
# store state
self.mne.visible_annotations = check_values
self.mne.show_hide_annotation_checkboxes = checkboxes
def _toggle_annotation_fig(self):
"""Show/hide the annotation dialog window."""
if self.mne.fig_annotation is None and not self.mne.is_epochs:
self._create_annotation_fig()
else:
plt.close(self.mne.fig_annotation)
def _compute_annotation_figsize(self, n_labels):
"""Adapt size of Annotation UI to accommodate the number of buttons.
self._create_annotation_fig() implements the following:
Fixed part of height:
0.1 top margin
1.0 instructions
0.5 padding below instructions
--- (variable-height axis for label list, returned by this method)
0.1 padding above text entry
0.3 text entry
0.1 padding above button
0.3 button
0.1 padding above checkbox
0.3 checkbox
0.1 bottom margin
------------------------------------------
2.9 total fixed height
"""
return max(ANNOTATION_FIG_PAD, 0.7 * n_labels)
def _add_annotation_label(self, event):
"""Add new annotation description."""
text = self.mne.fig_annotation.label.get_text()
self.mne.new_annotation_labels.append(text)
self._setup_annotation_colors()
self._update_annotation_fig()
# automatically activate new label's radio button
idx = [label.get_text() for label in
self.mne.fig_annotation.mne.radio_ax.buttons.labels].index(text)
self.mne.fig_annotation._set_active_button(idx)
# simulate a click on the radiobutton → update the span selector color
self.mne.fig_annotation._radiopress(event=None)
# reset the text entry box's text
self.mne.fig_annotation.label.set_text('BAD_')
def _select_annotation_span(self, vmin, vmax):
"""Handle annotation span selector."""
onset = _sync_onset(self.mne.inst, vmin, True) - self.mne.first_time
duration = vmax - vmin
buttons = self.mne.fig_annotation.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
if buttons.value_selected is not None:
active_idx = labels.index(buttons.value_selected)
_merge_annotations(onset, onset + duration, labels[active_idx],
self.mne.inst.annotations)
# if adding a span with an annotation label that is hidden, show it
if not self.mne.visible_annotations[buttons.value_selected]:
self.mne.show_hide_annotation_checkboxes.set_active(active_idx)
self._redraw(update_data=False, annotations=True)
else:
logger.warning('No annotation-label exists! '
'Add one by typing the name and clicking '
'on "Add new label" in the annotation-dialog.')
def _remove_annotation_hover_line(self):
"""Remove annotation line from the plot and reactivate selector."""
if self.mne.annotation_hover_line is not None:
self.mne.annotation_hover_line.remove()
self.mne.annotation_hover_line = None
self.mne.ax_main.selector.active = True
self.canvas.draw()
def _modify_annotation(self, old_x, new_x):
"""Modify annotation."""
segment = np.array(np.where(self.mne.annotation_segments == old_x))
if segment.shape[1] == 0:
return
raw = self.mne.inst
annotations = raw.annotations
first_time = self.mne.first_time
idx = [segment[0][0], segment[1][0]]
onset = _sync_onset(raw, self.mne.annotation_segments[idx[0]][0], True)
ann_idx = np.where(annotations.onset == onset - first_time)[0]
if idx[1] == 0: # start of annotation
onset = _sync_onset(raw, new_x, True) - first_time
duration = annotations.duration[ann_idx] + old_x - new_x
else: # end of annotation
onset = annotations.onset[ann_idx]
duration = _sync_onset(raw, new_x, True) - onset - first_time
if duration < 0:
onset += duration
duration *= -1.
_merge_annotations(onset, onset + duration,
annotations.description[ann_idx],
annotations, ann_idx)
self._draw_annotations()
self._remove_annotation_hover_line()
self.canvas.draw_idle()
def _clear_annotations(self):
"""Clear all annotations from the figure."""
for annot in list(self.mne.annotations):
annot.remove()
self.mne.annotations.remove(annot)
for annot in list(self.mne.hscroll_annotations):
annot.remove()
self.mne.hscroll_annotations.remove(annot)
for text in list(self.mne.annotation_texts):
text.remove()
self.mne.annotation_texts.remove(text)
def _draw_annotations(self):
"""Draw (or redraw) the annotation spans."""
self._clear_annotations()
self._update_annotation_segments()
segments = self.mne.annotation_segments
times = self.mne.times
ax = self.mne.ax_main
ylim = ax.get_ylim()
for idx, (start, end) in enumerate(segments):
descr = self.mne.inst.annotations.description[idx]
segment_color = self.mne.annotation_segment_colors[descr]
kwargs = dict(color=segment_color, alpha=0.3,
zorder=self.mne.zorder['ann'])
if self.mne.visible_annotations[descr]:
# draw all segments on ax_hscroll
annot = self.mne.ax_hscroll.fill_betweenx((0, 1), start, end,
**kwargs)
self.mne.hscroll_annotations.append(annot)
# draw only visible segments on ax_main
visible_segment = np.clip([start, end], times[0], times[-1])
if np.diff(visible_segment) > 0:
annot = ax.fill_betweenx(ylim, *visible_segment, **kwargs)
self.mne.annotations.append(annot)
xy = (visible_segment.mean(), ylim[1])
text = ax.annotate(descr, xy, xytext=(0, 9),
textcoords='offset points', ha='center',
va='baseline', color=segment_color)
self.mne.annotation_texts.append(text)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# CHANNEL SELECTION GUI
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_selection_fig(self):
"""Create channel selection dialog window."""
from matplotlib.colors import to_rgb
from matplotlib.gridspec import GridSpec
from matplotlib.widgets import RadioButtons
# make figure
fig = self._new_child_figure(figsize=(3, 7),
FigureClass=MNESelectionFigure,
fig_name='fig_selection',
window_title='Channel selection')
# XXX when matplotlib 3.3 is min version, replace this with
# XXX gs = fig.add_gridspec(15, 1)
gs = GridSpec(nrows=15, ncols=1)
# add sensor plot at top
fig.mne.sensor_ax = fig.add_subplot(gs[:5])
plot_sensors(self.mne.info, kind='select', ch_type='all', title='',
axes=fig.mne.sensor_ax, ch_groups=self.mne.group_by,
show=False)
fig.subplots_adjust(bottom=0.01, top=0.99, left=0.01, right=0.99)
# style the sensors so the selection is easier to distinguish
fig.lasso.linewidth_selected = 2
self._update_highlighted_sensors()
# add radio button axes
radio_ax = fig.add_subplot(gs[5:-3], frame_on=False, aspect='equal')
fig.mne.radio_ax = radio_ax
selections_dict = self.mne.ch_selections
selections_dict.update(Custom=np.array([], dtype=int)) # for lasso
labels = list(selections_dict)
# make & style the radio buttons
activecolor = to_rgb(self.mne.fgcolor) + (0.5,)
radio_ax.buttons = RadioButtons(radio_ax, labels,
activecolor=activecolor)
fig.mne.old_selection = 0
for circle in radio_ax.buttons.circles:
circle.set_radius(0.25 / len(labels))
circle.set_linewidth(2)
circle.set_edgecolor(self.mne.fgcolor)
fig._style_radio_buttons_butterfly()
# add instructions at bottom
instructions = (
'To use a custom selection, first click-drag on the sensor plot '
'to "lasso" the sensors you want to select, or hold Ctrl while '
'clicking individual sensors. Holding Ctrl while click-dragging '
'allows a lasso selection adding to (rather than replacing) the '
'existing selection.')
instructions_ax = fig.add_subplot(gs[-3:], frame_on=False)
instructions_ax.text(0.04, 0.08, instructions, va='bottom', ha='left',
ma='left', wrap=True)
instructions_ax.set_axis_off()
# add event listeners
radio_ax.buttons.on_clicked(fig._radiopress)
fig.lasso.callbacks.append(fig._set_custom_selection)
def _change_selection_vscroll(self, event):
"""Handle clicks on vertical scrollbar when using selections."""
buttons = self.mne.fig_selection.mne.radio_ax.buttons
labels = [label.get_text() for label in buttons.labels]
offset = 0
selections_dict = self.mne.ch_selections
for idx, label in enumerate(labels):
offset += len(selections_dict[label])
if event.ydata < offset:
with _events_off(buttons):
buttons.set_active(idx)
self.mne.fig_selection._radiopress(event)
return
def _update_selection(self):
"""Update visible channels based on selection dialog interaction."""
selections_dict = self.mne.ch_selections
fig = self.mne.fig_selection
buttons = fig.mne.radio_ax.buttons
label = buttons.value_selected
labels = [_label.get_text() for _label in buttons.labels]
self.mne.fig_selection.mne.old_selection = labels.index(label)
self.mne.picks = selections_dict[label]
self.mne.n_channels = len(self.mne.picks)
self._update_highlighted_sensors()
# if "Vertex" is defined, some channels appear twice, so if
# "Vertex" is selected, ch_start should be the *first* match;
# otherwise it should be the *last* match (since "Vertex" is
# always the first selection group, if it exists).
index = 0 if label == 'Vertex' else -1
ch_order = np.concatenate(list(selections_dict.values()))
ch_start = np.where(ch_order == self.mne.picks[0])[0][index]
self.mne.ch_start = ch_start
self._update_trace_offsets()
self._update_vscroll()
self._redraw(annotations=True)
def _update_highlighted_sensors(self):
"""Update the sensor plot to show what is selected."""
inds = np.in1d(self.mne.fig_selection.lasso.ch_names,
self.mne.ch_names[self.mne.picks]).nonzero()[0]
self.mne.fig_selection.lasso.select_many(inds)
def _update_bad_sensors(self, pick, mark_bad):
"""Update the sensor plot to reflect (un)marked bad channels."""
# replicate plotting order from plot_sensors(), to get index right
sensor_picks = list()
ch_indices = channel_indices_by_type(self.mne.info)
for this_type in _DATA_CH_TYPES_SPLIT:
if this_type in self.mne.ch_types:
sensor_picks.extend(ch_indices[this_type])
sensor_idx = np.in1d(sensor_picks, pick).nonzero()[0]
# change the sensor color
fig = self.mne.fig_selection
fig.lasso.ec[sensor_idx, 0] = float(mark_bad) # change R of RGBA array
fig.lasso.collection.set_edgecolors(fig.lasso.ec)
fig.canvas.draw_idle()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# PROJECTORS & BAD CHANNELS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _create_proj_fig(self):
"""Create the projectors dialog window."""
from matplotlib.widgets import Button, CheckButtons
projs = self.mne.projs
labels = [p['desc'] for p in projs]
for ix, active in enumerate(self.mne.projs_active):
if active:
labels[ix] += ' (already applied)'
# make figure
width = max([4.5, max([len(label) for label in labels]) / 8 + 0.5])
height = (len(projs) + 1) / 6 + 1.5
fig = self._new_child_figure(figsize=(width, height),
fig_name='fig_proj',
window_title='SSP projection vectors')
# pass through some proj fig keypresses to the parent
fig.canvas.mpl_connect(
'key_press_event',
lambda ev: self._keypress(ev) if ev.key in 'jJ' else None)
# make axes
offset = (1 / 6 / height)
position = (0, offset, 1, 0.8 - offset)
ax = fig.add_axes(position, frame_on=False, aspect='equal')
# make title
first_line = ('Projectors already applied to the data are dimmed.\n'
if any(self.mne.projs_active) else '')
second_line = 'Projectors marked with "X" are active on the plot.'
ax.set_title(f'{first_line}{second_line}')
# draw checkboxes
checkboxes = CheckButtons(ax, labels=labels, actives=self.mne.projs_on)
# gray-out already applied projectors
for label, rect, lines in zip(checkboxes.labels,
checkboxes.rectangles,
checkboxes.lines):
if label.get_text().endswith('(already applied)'):
label.set_color('0.5')
rect.set_edgecolor('0.7')
[x.set_color('0.7') for x in lines]
rect.set_linewidth(1)
# add "toggle all" button
ax_all = fig.add_axes((0.25, 0.01, 0.5, offset), frame_on=True)
fig.mne.proj_all = Button(ax_all, 'Toggle all')
# add event listeners
checkboxes.on_clicked(self._toggle_proj_checkbox)
fig.mne.proj_all.on_clicked(partial(self._toggle_proj_checkbox,
toggle_all=True))
# save params
fig.mne.proj_checkboxes = checkboxes
# show figure
self.mne.fig_proj.canvas.draw()
plt_show(fig=self.mne.fig_proj, warn=False)
def _toggle_proj_fig(self, event=None):
"""Show/hide the projectors dialog window."""
if self.mne.fig_proj is None:
self._create_proj_fig()
else:
plt.close(self.mne.fig_proj)
def _toggle_proj_checkbox(self, event, toggle_all=False):
"""Perform operations when proj boxes clicked."""
on = self.mne.projs_on
applied = self.mne.projs_active
fig = self.mne.fig_proj
new_state = (np.full_like(on, not all(on)) if toggle_all else
np.array(fig.mne.proj_checkboxes.get_status()))
# update Xs when toggling all
if fig is not None:
if toggle_all:
with _events_off(fig.mne.proj_checkboxes):
for ix in np.where(on != new_state)[0]:
fig.mne.proj_checkboxes.set_active(ix)
# don't allow disabling already-applied projs
with _events_off(fig.mne.proj_checkboxes):
for ix in np.where(applied)[0]:
if not new_state[ix]:
fig.mne.proj_checkboxes.set_active(ix)
new_state[applied] = True
# update the data if necessary
if not np.array_equal(on, new_state):
self.mne.projs_on = new_state
self._update_projector()
self._redraw()
def _toggle_epoch_histogram(self):
"""Show or hide peak-to-peak histogram of channel amplitudes."""
if self.mne.instance_type == 'epochs':
if self.mne.fig_histogram is None:
self._create_epoch_histogram()
else:
plt.close(self.mne.fig_histogram)
def _toggle_bad_channel(self, idx):
"""Mark/unmark bad channels; `idx` is index of *visible* channels."""
color, pick, marked_bad = super()._toggle_bad_channel(idx)
# update sensor color (if in selection mode)
if self.mne.fig_selection is not None:
self._update_bad_sensors(pick, marked_bad)
# update vscroll color
vscroll_idx = (self.mne.ch_order == pick).nonzero()[0]
for _idx in vscroll_idx:
self.mne.ax_vscroll.patches[_idx].set_color(color)
# redraw
self._redraw()
def _toggle_bad_epoch(self, event):
"""Mark/unmark bad epochs."""
epoch_ix, color = super()._toggle_bad_epoch(event.xdata)
self.mne.ax_hscroll.patches[epoch_ix].set_color(color)
self._redraw(update_data=False)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SCROLLBARS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _update_zen_mode_offsets(self):
"""Compute difference between main axes edges and scrollbar edges."""
self.mne.fig_size_px = self._get_size_px()
self.mne.zen_w = (self.mne.ax_vscroll.get_position().xmax -
self.mne.ax_main.get_position().xmax)
self.mne.zen_h = (self.mne.ax_main.get_position().ymin -
self.mne.ax_hscroll.get_position().ymin)
def _toggle_scrollbars(self):
"""Show or hide scrollbars (A.K.A. zen mode)."""
self._update_zen_mode_offsets()
# grow/shrink main axes to take up space from (or make room for)
# scrollbars. We can't use ax.set_position() because axes are
# locatable, so we use subplots_adjust
should_show = not self.mne.scrollbars_visible
margins = {side: getattr(self.subplotpars, side)
for side in ('left', 'bottom', 'right', 'top')}
# if should_show, bottom margin moves up; right margin moves left
margins['bottom'] += (1 if should_show else -1) * self.mne.zen_h
margins['right'] += (-1 if should_show else 1) * self.mne.zen_w
self.subplots_adjust(**margins)
# handle x-axis label
self.mne.zen_xlabel.set_visible(not should_show)
# show/hide other UI elements
for elem in ('ax_hscroll', 'ax_vscroll', 'ax_proj', 'ax_help'):
if elem == 'ax_vscroll' and self.mne.butterfly:
continue
# sometimes we don't have a proj button (ax_proj)
if getattr(self.mne, elem, None) is not None:
getattr(self.mne, elem).set_visible(should_show)
self.mne.scrollbars_visible = should_show
def _update_vscroll(self):
"""Update the vertical scrollbar (channel) selection indicator."""
self.mne.vsel_patch.set_xy((0, self.mne.ch_start))
self.mne.vsel_patch.set_height(self.mne.n_channels)
self._update_yaxis_labels()
def _update_hscroll(self):
"""Update the horizontal scrollbar (time) selection indicator."""
self.mne.hsel_patch.set_xy((self.mne.t_start, 0))
self.mne.hsel_patch.set_width(self.mne.duration)
def _check_update_hscroll_clicked(self, event):
"""Handle clicks on horizontal scrollbar."""
time = event.xdata - self.mne.duration / 2
max_time = (self.mne.n_times / self.mne.info['sfreq'] +
self.mne.first_time - self.mne.duration)
time = np.clip(time, self.mne.first_time, max_time)
if self.mne.is_epochs:
ix = np.searchsorted(self.mne.boundary_times[1:], time)
time = self.mne.boundary_times[ix]
if self.mne.t_start != time:
self.mne.t_start = time
self._update_hscroll()
return True
return False
def _check_update_vscroll_clicked(self, event):
"""Update vscroll patch on click, return True if location changed."""
new_ch_start = np.clip(
int(round(event.ydata - self.mne.n_channels / 2)),
0, len(self.mne.ch_order) - self.mne.n_channels)
if self.mne.ch_start != new_ch_start:
self.mne.ch_start = new_ch_start
self._update_picks()
self._update_vscroll()
return True
return False
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# SCALEBARS & AXIS LABELS
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _show_scalebars(self):
"""Add channel scale bars."""
for offset, pick in zip(self.mne.trace_offsets, self.mne.picks):
this_name = self.mne.ch_names[pick]
this_type = self.mne.ch_types[pick]
if (this_type not in self.mne.scalebars and
this_type != 'stim' and
this_type in self.mne.scalings and
this_type in getattr(self.mne, 'units', {}) and
this_type in getattr(self.mne, 'unit_scalings', {}) and
this_name not in self.mne.info['bads'] and
this_name not in self.mne.whitened_ch_names):
x = (self.mne.times[0] + self.mne.first_time,) * 2
denom = 4 if self.mne.butterfly else 2
y = tuple(np.array([-1, 1]) / denom + offset)
self._draw_one_scalebar(x, y, this_type)
if self.mne.is_epochs:
x = (self.mne.times[0], self.mne.times[0] +
self.mne.boundary_times[1] / 2)
y_value = self.mne.n_channels - 0.5
y = (y_value, y_value)
self._draw_one_scalebar(x, y, 'time')
def _hide_scalebars(self):
"""Remove channel scale bars."""
for bar in self.mne.scalebars.values():
bar.remove()
for text in self.mne.scalebar_texts.values():
text.remove()
self.mne.scalebars = dict()
self.mne.scalebar_texts = dict()
def _toggle_scalebars(self, event):
"""Show/hide the scalebars."""
if self.mne.scalebars_visible:
self._hide_scalebars()
else:
self._update_picks()
self._show_scalebars()
# toggle
self.mne.scalebars_visible = not self.mne.scalebars_visible
self._redraw(update_data=False)
def _draw_one_scalebar(self, x, y, ch_type):
"""Draw a scalebar."""
from .utils import _simplify_float
color = '#AA3377' # purple
kwargs = dict(color=color, zorder=self.mne.zorder['scalebar'])
if ch_type == 'time':
label = f'{self.mne.boundary_times[1]/2:.2f} sec'
text = self.mne.ax_main.text(x[0] + .015, y[1] - .05, label,
va='bottom', ha='left',
size='xx-small', **kwargs)
else:
scaler = 1 if self.mne.butterfly else 2
inv_norm = (scaler *
self.mne.scalings[ch_type] *
self.mne.unit_scalings[ch_type] /
self.mne.scale_factor)
label = f'{_simplify_float(inv_norm)} {self.mne.units[ch_type]} '
text = self.mne.ax_main.text(x[1], y[1], label, va='baseline',
ha='right', size='xx-small', **kwargs)
bar = self.mne.ax_main.plot(x, y, lw=4, **kwargs)[0]
self.mne.scalebars[ch_type] = bar
self.mne.scalebar_texts[ch_type] = text
def _update_yaxis_labels(self):
"""Change the y-axis labels."""
if self.mne.butterfly and self.mne.fig_selection is not None:
exclude = ('Vertex', 'Custom')
ticklabels = list(self.mne.ch_selections)
keep_mask = np.in1d(ticklabels, exclude, invert=True)
ticklabels = [t.replace('Left-', 'L-').replace('Right-', 'R-')
for t in ticklabels] # avoid having to rotate labels
ticklabels = np.array(ticklabels)[keep_mask]
elif self.mne.butterfly:
_, ixs, _ = np.intersect1d(_DATA_CH_TYPES_ORDER_DEFAULT,
self.mne.ch_types, return_indices=True)
ixs.sort()
ticklabels = np.array(_DATA_CH_TYPES_ORDER_DEFAULT)[ixs]
else:
ticklabels = self.mne.ch_names[self.mne.picks]
texts = self.mne.ax_main.set_yticklabels(ticklabels, picker=True)
for text in texts:
sty = ('italic' if text.get_text() in self.mne.whitened_ch_names
else 'normal')
text.set_style(sty)
def _xtick_formatter(self, x, pos=None, ax_type='main'):
"""Change the x-axis labels."""
tickdiff = np.diff(self.mne.ax_main.get_xticks())[0]
digits = np.ceil(-np.log10(tickdiff) + 1).astype(int)
# always show millisecond precision for vline text
if ax_type == 'vline':
digits = 3
if self.mne.time_format == 'float':
# round to integers when possible ('9.0' → '9')
if int(x) == x:
digits = None
if ax_type == 'vline':
return f'{round(x, digits)} s'
return str(round(x, digits))
# format as timestamp
meas_date = self.mne.inst.info['meas_date']
first_time = datetime.timedelta(seconds=self.mne.inst.first_time)
xtime = datetime.timedelta(seconds=x)
xdatetime = meas_date + first_time + xtime
xdtstr = xdatetime.strftime('%H:%M:%S')
if digits and ax_type != 'hscroll' and int(xdatetime.microsecond):
xdtstr += f'{round(xdatetime.microsecond * 1e-6, digits)}'[1:]
return xdtstr
def _toggle_time_format(self):
if self.mne.time_format == 'float':
self.mne.time_format = 'clock'
x_axis_label = 'Time (HH:MM:SS)'
else:
self.mne.time_format = 'float'
x_axis_label = 'Time (s)'
# Change x-axis label
for _ax in (self.mne.ax_main, self.mne.ax_hscroll):
_ax.set_xlabel(x_axis_label)
self._redraw(update_data=False, annotations=False)
# Update vline-text if displayed
if self.mne.vline is not None and self.mne.vline.get_visible():
self._show_vline(self.mne.vline.get_xdata())
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# DATA TRACES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _toggle_butterfly(self):
"""Enter or leave butterfly mode."""
self.mne.ax_vscroll.set_visible(self.mne.butterfly)
self.mne.butterfly = not self.mne.butterfly
self.mne.scale_factor *= 0.5 if self.mne.butterfly else 2.
self._update_picks()
self._update_trace_offsets()
self._redraw(annotations=True)
if self.mne.fig_selection is not None:
self.mne.fig_selection._style_radio_buttons_butterfly()
def _update_trace_offsets(self):
"""Compute viewport height and adjust offsets."""
# simultaneous selection and butterfly modes
if self.mne.butterfly and self.mne.ch_selections is not None:
self._update_picks()
selections_dict = self._make_butterfly_selections_dict()
n_offsets = len(selections_dict)
sel_order = list(selections_dict)
offsets = np.array([])
for pick in self.mne.picks:
for sel in sel_order:
if pick in selections_dict[sel]:
offsets = np.append(offsets, sel_order.index(sel))
# butterfly only
elif self.mne.butterfly:
unique_ch_types = set(self.mne.ch_types)
n_offsets = len(unique_ch_types)
ch_type_order = [_type for _type in _DATA_CH_TYPES_ORDER_DEFAULT
if _type in unique_ch_types]
offsets = np.array([ch_type_order.index(ch_type)
for ch_type in self.mne.ch_types])
# normal mode
else:
n_offsets = self.mne.n_channels
offsets = np.arange(n_offsets, dtype=float)
# update ylim, ticks, vertline, and scrollbar patch
ylim = (n_offsets - 0.5, -0.5) # inverted y axis → new chs at bottom
self.mne.ax_main.set_ylim(ylim)
self.mne.ax_main.set_yticks(np.unique(offsets))
self.mne.vsel_patch.set_height(self.mne.n_channels)
# store new offsets, update axis labels
self.mne.trace_offsets = offsets
self._update_yaxis_labels()
def _draw_traces(self):
"""Draw (or redraw) the channel data."""
from matplotlib.colors import to_rgba_array
from matplotlib.patches import Rectangle
# clear scalebars
if self.mne.scalebars_visible:
self._hide_scalebars()
# get info about currently visible channels
picks = self.mne.picks
ch_names = self.mne.ch_names[picks]
ch_types = self.mne.ch_types[picks]
offset_ixs = (picks
if self.mne.butterfly and self.mne.ch_selections is None
else slice(None))
offsets = self.mne.trace_offsets[offset_ixs]
bad_bool = np.in1d(ch_names, self.mne.info['bads'])
# colors
good_ch_colors = [self.mne.ch_color_dict[_type] for _type in ch_types]
ch_colors = to_rgba_array(
[self.mne.ch_color_bad if _bad else _color
for _bad, _color in zip(bad_bool, good_ch_colors)])
self.mne.ch_colors = np.array(good_ch_colors) # use for unmarking bads
labels = self.mne.ax_main.yaxis.get_ticklabels()
if self.mne.butterfly:
for label in labels:
label.set_color(self.mne.fgcolor)
else:
for label, color in zip(labels, ch_colors):
label.set_color(color)
# decim
decim = np.ones_like(picks)
data_picks_mask = np.in1d(picks, self.mne.picks_data)
decim[data_picks_mask] = self.mne.decim
# decim can vary by channel type, so compute different `times` vectors
decim_times = {decim_value:
self.mne.times[::decim_value] + self.mne.first_time
for decim_value in set(decim)}
# add more traces if needed
n_picks = len(picks)
if n_picks > len(self.mne.traces):
n_new_chs = n_picks - len(self.mne.traces)
new_traces = self.mne.ax_main.plot(np.full((1, n_new_chs), np.nan),
**self.mne.trace_kwargs)
self.mne.traces.extend(new_traces)
# remove extra traces if needed
extra_traces = self.mne.traces[n_picks:]
for trace in extra_traces:
trace.remove()
self.mne.traces = self.mne.traces[:n_picks]
# check for bad epochs
time_range = (self.mne.times + self.mne.first_time)[[0, -1]]
if self.mne.instance_type == 'epochs':
epoch_ix = np.searchsorted(self.mne.boundary_times, time_range)
epoch_ix = np.arange(epoch_ix[0], epoch_ix[1])
epoch_nums = self.mne.inst.selection[epoch_ix[0]:epoch_ix[-1] + 1]
visible_bad_epoch_ix, = np.in1d(
epoch_nums, self.mne.bad_epochs).nonzero()
while len(self.mne.epoch_traces):
self.mne.epoch_traces.pop(-1).remove()
# handle custom epoch colors (for autoreject integration)
if self.mne.epoch_colors is None:
# shape: n_traces × RGBA → n_traces × n_epochs × RGBA
custom_colors = np.tile(ch_colors[:, None, :],
(1, self.mne.n_epochs, 1))
else:
custom_colors = np.empty((len(self.mne.picks),
self.mne.n_epochs, 4))
for ii, _epoch_ix in enumerate(epoch_ix):
this_colors = self.mne.epoch_colors[_epoch_ix]
custom_colors[:, ii] = to_rgba_array([this_colors[_ch]
for _ch in picks])
# override custom color on bad epochs
for _ix in visible_bad_epoch_ix:
_cols = np.array([self.mne.epoch_color_bad,
self.mne.ch_color_bad],
dtype=object)[bad_bool.astype(int)]
custom_colors[:, _ix] = to_rgba_array(_cols)
# update traces
ylim = self.mne.ax_main.get_ylim()
for ii, line in enumerate(self.mne.traces):
this_name = ch_names[ii]
this_type = ch_types[ii]
this_offset = offsets[ii]
this_times = decim_times[decim[ii]]
this_data = this_offset - self.mne.data[ii] * self.mne.scale_factor
this_data = this_data[..., ::decim[ii]]
# clip
if self.mne.clipping == 'clamp':
this_data = np.clip(this_data, -0.5, 0.5)
elif self.mne.clipping is not None:
clip = self.mne.clipping * (0.2 if self.mne.butterfly else 1)
bottom = max(this_offset - clip, ylim[1])
height = min(2 * clip, ylim[0] - bottom)
rect = Rectangle(xy=np.array([time_range[0], bottom]),
width=time_range[1] - time_range[0],
height=height,
transform=self.mne.ax_main.transData)
line.set_clip_path(rect)
# prep z order
is_bad_ch = this_name in self.mne.info['bads']
this_z = self.mne.zorder['bads' if is_bad_ch else 'data']
if self.mne.butterfly and not is_bad_ch:
this_z = self.mne.zorder.get(this_type, this_z)
# plot each trace multiple times to get the desired epoch coloring.
# use masked arrays to plot discontinuous epochs that have the same
# color in a single plot() call.
if self.mne.instance_type == 'epochs':
this_colors = custom_colors[ii]
for cix, color in enumerate(np.unique(this_colors, axis=0)):
bool_ixs = (this_colors == color).all(axis=1)
mask = np.zeros_like(this_times, dtype=bool)
_starts = self.mne.boundary_times[epoch_ix][bool_ixs]
_stops = self.mne.boundary_times[epoch_ix + 1][bool_ixs]
for _start, _stop in zip(_starts, _stops):
_mask = np.logical_and(_start < this_times,
this_times <= _stop)
mask = mask | _mask
_times = np.ma.masked_array(this_times, mask=~mask)
# always use the existing traces first
if cix == 0:
line.set_xdata(_times)
line.set_ydata(this_data)
line.set_color(color)
line.set_zorder(this_z)
else: # make new traces as needed
_trace = self.mne.ax_main.plot(
_times, this_data, color=color, zorder=this_z,
**self.mne.trace_kwargs)
self.mne.epoch_traces.extend(_trace)
else:
line.set_xdata(this_times)
line.set_ydata(this_data)
line.set_color(ch_colors[ii])
line.set_zorder(this_z)
# update xlim
self.mne.ax_main.set_xlim(*time_range)
# draw scalebars maybe
if self.mne.scalebars_visible:
self._show_scalebars()
# redraw event lines
if self.mne.event_times is not None:
self._draw_event_lines()
def _redraw(self, update_data=True, annotations=False):
"""Redraw (convenience method for frequently grouped actions)."""
super()._redraw(update_data, annotations)
if self.mne.vline_visible and self.mne.is_epochs:
# prevent flickering
_ = self._recompute_epochs_vlines(None)
self.canvas.draw_idle()
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# EVENT LINES AND MARKER LINES
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def _draw_event_lines(self):
"""Draw the event lines and their labels."""
from matplotlib.collections import LineCollection
from matplotlib.colors import to_rgba_array
if self.mne.event_nums is not None:
mask = np.logical_and(self.mne.event_times >= self.mne.times[0],
self.mne.event_times <= self.mne.times[-1])
this_event_times = self.mne.event_times[mask]
this_event_nums = self.mne.event_nums[mask]
n_visible_events = len(this_event_times)
colors = to_rgba_array([self.mne.event_color_dict[n]
for n in this_event_nums])
# create event lines
ylim = self.mne.ax_main.get_ylim()
xs = np.repeat(this_event_times, 2)
ys = np.tile(ylim, n_visible_events)
segs = np.vstack([xs, ys]).T.reshape(n_visible_events, 2, 2)
event_lines = LineCollection(segs, linewidths=0.5, colors=colors,
zorder=self.mne.zorder['events'])
self.mne.ax_main.add_collection(event_lines)
self.mne.event_lines = event_lines
# create event labels
while len(self.mne.event_texts):
self.mne.event_texts.pop().remove()
for _t, _n, _c in zip(this_event_times, this_event_nums, colors):
label = self.mne.event_id_rev.get(_n, _n)
this_text = self.mne.ax_main.annotate(
label, (_t, ylim[1]), ha='center', va='baseline',
color=self.mne.fgcolor, xytext=(0, 2),
textcoords='offset points', fontsize=8)
self.mne.event_texts.append(this_text)
def _recompute_epochs_vlines(self, xdata):
"""Recompute vline x-coords for epochs plots (after scrolling, etc)."""
# special case: changed view duration w/ "home" or "end" key
# (no click event, hence no xdata)
if xdata is None:
xdata = np.array(self.mne.vline.get_segments())[0, 0, 0]
# compute the (continuous) times for the lines on each epoch
epoch_dur = np.diff(self.mne.boundary_times[:2])[0]
rel_time = xdata % epoch_dur
abs_time = self.mne.times[0]
xs = np.arange(self.mne.n_epochs) * epoch_dur + abs_time + rel_time
segs = np.array(self.mne.vline.get_segments())
# recreate segs from scratch in case view duration changed
# (i.e., handle case when n_segments != n_epochs)
segs = np.tile([[0.], [1.]], (len(xs), 1, 2)) # y values
segs[..., 0] = np.tile(xs[:, None], 2) # x values
self.mne.vline.set_segments(segs)
return rel_time
def _show_vline(self, xdata):
"""Show the vertical line(s)."""
if self.mne.is_epochs:
# convert xdata to be epoch-relative (for the text)
rel_time = self._recompute_epochs_vlines(xdata)
xdata = rel_time + self.mne.inst.times[0]
else:
self.mne.vline.set_xdata(xdata)
self.mne.vline_hscroll.set_xdata(xdata)
text = self._xtick_formatter(xdata, ax_type='vline')[:12]
self.mne.vline_text.set_text(text)
self._toggle_vline(True)
def _toggle_vline(self, visible):
"""Show or hide the vertical line(s)."""
for artist in (self.mne.vline, self.mne.vline_hscroll,
self.mne.vline_text):
if artist is not None:
artist.set_visible(visible)
self.draw_artist(artist)
self.mne.vline_visible = visible
self.canvas.draw_idle()
# workaround: plt.close() doesn't spawn close_event on Agg backend
# (check MPL github issue #18609; scheduled to be fixed by MPL 3.6)
def _close_event(self, fig=None):
"""Force calling of the MPL figure close event."""
fig = fig or self
_close_event(fig)
def _fake_keypress(self, key, fig=None):
fig = fig or self
_fake_keypress(fig, key)
def _fake_click(self, point, add_points=None, fig=None, ax=None,
xform='ax', button=1, kind='press'):
"""Fake a click at a relative point within axes."""
fig = fig or self
ax = ax or self.mne.ax_main
if kind == 'drag' and add_points is not None:
_fake_click(fig=fig, ax=ax, point=point, xform=xform,
button=button, kind='press')
for apoint in add_points:
_fake_click(fig=fig, ax=ax, point=apoint, xform=xform,
button=button, kind='motion')
_fake_click(fig=fig, ax=ax, point=add_points[-1], xform=xform,
button=button, kind='release')
else:
_fake_click(fig=fig, ax=ax, point=point, xform=xform,
button=button, kind=kind)
def _fake_scroll(self, x, y, step, fig=None):
fig = fig or self
_fake_scroll(fig, x, y, step)
def _click_ch_name(self, ch_index, button):
_click_ch_name(self, ch_index, button)
def _resize_by_factor(self, factor=None):
size = self.canvas.manager.canvas.get_width_height()
if isinstance(factor, tuple):
size = int(size[0] * factor[0],
size[1] * factor[1])
else:
size = [int(x * factor) for x in size]
self.canvas.manager.resize(*size)
def _get_ticklabels(self, orientation):
if orientation == 'x':
labels = self.mne.ax_main.get_xticklabels(minor=self.mne.is_epochs)
elif orientation == 'y':
labels = self.mne.ax_main.get_yticklabels()
label_texts = [lb.get_text() for lb in labels]
return label_texts
def _get_scale_bar_texts(self):
texts = tuple(t.get_text().strip() for t in self.mne.ax_main.texts)
# First text is empty because of vline
texts = texts[1:]
return texts
class MNELineFigure(MNEFigure):
"""Interactive figure for non-scrolling line plots."""
def __init__(self, inst, n_axes, figsize, **kwargs):
super().__init__(figsize=figsize, inst=inst, **kwargs)
# AXES: default margins (inches)
l_margin = 0.8
r_margin = 0.2
b_margin = 0.65
t_margin = 0.35
# AXES: default margins (figure-relative coordinates)
left = self._inch_to_rel(l_margin)
right = 1 - self._inch_to_rel(r_margin)
bottom = self._inch_to_rel(b_margin, horiz=False)
top = 1 - self._inch_to_rel(t_margin, horiz=False)
# AXES: make subplots
axes = [self.add_subplot(n_axes, 1, 1)]
for ix in range(1, n_axes):
axes.append(self.add_subplot(n_axes, 1, ix + 1, sharex=axes[0]))
self.subplotpars.update(left=left, bottom=bottom, top=top, right=right,
hspace=0.4)
# save useful things
self.mne.ax_list = axes
def _resize(self, event):
"""Handle resize event."""
old_width, old_height = self.mne.fig_size_px
new_width, new_height = self._get_size_px()
new_margins = _calc_new_margins(
self, old_width, old_height, new_width, new_height)
self.subplots_adjust(**new_margins)
self.mne.fig_size_px = (new_width, new_height)
def _close_all():
"""Close all figures (only used in our tests)."""
plt.close('all')
def _get_n_figs():
return len(plt.get_fignums())
def _figure(toolbar=True, FigureClass=MNEFigure, **kwargs):
"""Instantiate a new figure."""
from matplotlib import rc_context
title = kwargs.pop('window_title', None) # extract title before init
rc = dict() if toolbar else dict(toolbar='none')
with rc_context(rc=rc):
fig = plt.figure(FigureClass=FigureClass, **kwargs)
# BACKEND defined globally at the top of this file
fig.mne.backend = BACKEND
if title is not None:
_set_window_title(fig, title)
# add event callbacks
fig._add_default_callbacks()
return fig
def _line_figure(inst, axes=None, picks=None, **kwargs):
"""Instantiate a new line figure."""
from matplotlib.axes import Axes
# if picks is None, only show data channels
allowed_ch_types = (_DATA_CH_TYPES_SPLIT if picks is None else
_VALID_CHANNEL_TYPES)
# figure out expected number of axes
ch_types = np.array(inst.get_channel_types())
if picks is not None:
ch_types = ch_types[picks]
n_axes = len(np.intersect1d(ch_types, allowed_ch_types))
# handle user-provided axes
if axes is not None:
if isinstance(axes, Axes):
axes = [axes]
_validate_if_list_of_axes(axes, n_axes)
fig = axes[0].get_figure()
else:
figsize = kwargs.pop('figsize', (10, 2.5 * n_axes + 1))
fig = _figure(inst=inst, toolbar=True, FigureClass=MNELineFigure,
figsize=figsize, n_axes=n_axes, **kwargs)
fig.mne.fig_size_px = fig._get_size_px() # can't do in __init__
axes = fig.mne.ax_list
return fig, axes
def _split_picks_by_type(inst, picks, units, scalings, titles):
"""Separate picks, units, etc, for plotting on separate subplots."""
picks_list = list()
units_list = list()
scalings_list = list()
titles_list = list()
# if picks is None, only show data channels
allowed_ch_types = (_DATA_CH_TYPES_SPLIT if picks is None else
_VALID_CHANNEL_TYPES)
for ch_type in allowed_ch_types:
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if ch_type in ('mag', 'grad'):
pick_kwargs['meg'] = ch_type
elif ch_type in _FNIRS_CH_TYPES_SPLIT:
pick_kwargs['fnirs'] = ch_type
else:
pick_kwargs[ch_type] = True
these_picks = pick_types(inst.info, **pick_kwargs)
these_picks = np.intersect1d(these_picks, picks)
if len(these_picks) > 0:
picks_list.append(these_picks)
units_list.append(units[ch_type])
scalings_list.append(scalings[ch_type])
titles_list.append(titles[ch_type])
if len(picks_list) == 0:
raise RuntimeError('No data channels found')
return picks_list, units_list, scalings_list, titles_list
def _calc_new_margins(fig, old_width, old_height, new_width, new_height):
"""Compute new figure-relative values to maintain fixed-size margins."""
new_margins = dict()
for side in ('left', 'right', 'bottom', 'top'):
ratio = ((old_width / new_width) if side in ('left', 'right') else
(old_height / new_height))
rel_dim = getattr(fig.subplotpars, side)
if side in ('right', 'top'):
new_margins[side] = 1 - ratio * (1 - rel_dim)
else:
new_margins[side] = ratio * rel_dim
# gh-8304: don't allow resizing too small
if (new_margins['bottom'] < new_margins['top'] and
new_margins['left'] < new_margins['right']):
return new_margins
@contextmanager
def _patched_canvas(fig):
old_canvas = fig.canvas
if fig.canvas is None: # XXX old MPL (at least 3.0.3) does this for Agg
fig.canvas = Bunch(mpl_connect=lambda event, callback: None)
try:
yield
finally:
fig.canvas = old_canvas
def _init_browser(**kwargs):
"""Instantiate a new MNE browse-style figure."""
from mne.io import BaseRaw
fig = _figure(toolbar=False, FigureClass=MNEBrowseFigure, **kwargs)
# splash is ignored (maybe we could do it for mpl if we get_backend() and
# check if it's Qt... but seems overkill)
# initialize zen mode
# (can't do in __init__ due to get_position() calls)
fig.canvas.draw()
fig._update_zen_mode_offsets()
fig._resize(None) # needed for MPL >=3.4
# if scrollbars are supposed to start hidden,
# set to True and then toggle
if not fig.mne.scrollbars_visible:
fig.mne.scrollbars_visible = True
fig._toggle_scrollbars()
# Initialize parts of the plot
is_ica = fig.mne.instance_type == 'ica'
if not is_ica:
# make channel selection dialog,
# if requested (doesn't work well in init)
if fig.mne.group_by in ('selection', 'position'):
fig._create_selection_fig()
# start with projectors dialog open, if requested
if getattr(fig.mne, 'show_options', False):
fig._toggle_proj_fig()
# update data, and plot
fig._update_trace_offsets()
fig._redraw(update_data=True, annotations=False)
if isinstance(fig.mne.inst, BaseRaw):
fig._setup_annotation_colors()
fig._draw_annotations()
return fig
| bsd-3-clause | 83330d6591bc81d9a199deeda68ea215 | 44.897517 | 94 | 0.55261 | 3.730205 | false | false | false | false |
mne-tools/mne-python | mne/time_frequency/tfr.py | 1 | 103760 | """A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
# Clement Moutard <clement.moutard@polytechnique.org>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License : BSD-3-Clause
from copy import deepcopy
from functools import partial
import numpy as np
from .multitaper import dpss_windows
from ..baseline import rescale, _check_baseline
from ..filter import next_fast_len
from ..parallel import parallel_func
from ..utils import (logger, verbose, _time_mask, _freq_mask, check_fname,
sizeof_fmt, GetEpochsMixin, TimeMixin,
_prepare_read_metadata, fill_doc, _prepare_write_metadata,
_check_event_id, _gen_events, SizeMixin, _is_numeric,
_check_option, _validate_type, _check_combine,
_check_pandas_installed, _check_pandas_index_arguments,
_check_time_format, _convert_times, _build_data_frame,
warn, _import_h5io_funcs)
from ..channels.channels import UpdateChannelsMixin
from ..channels.layout import _merge_ch_data, _pair_grad_sensors
from ..defaults import (_INTERPOLATION_DEFAULT, _EXTRAPOLATE_DEFAULT,
_BORDER_DEFAULT)
from ..io.pick import (pick_info, _picks_to_idx, channel_type, _pick_inst,
_get_channel_types)
from ..io.meas_info import Info, ContainsMixin
from ..viz.utils import (figure_nobar, plt_show, _setup_cmap,
_connection_line, _prepare_joint_axes,
_setup_vmin_vmax, _set_title_multiple_electrodes)
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : array
Frequency range of interest (1 x Frequencies).
n_cycles : float | array of float, default 7.0
Number of cycles. Fixed number or one per frequency.
sigma : float, default None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= np.sqrt(0.5) * np.linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _make_dpss(sfreq, freqs, n_cycles=7., time_bandwidth=4.0, zero_mean=False):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), default 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, default 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of array
The wavelets time series.
"""
Ws = list()
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be "
"greater than 0.")
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps, sym=False)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= np.sqrt(0.5) * np.linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
# Low level convolution
def _get_nfft(wavelets, X, use_fft=True, check=True):
n_times = X.shape[-1]
max_size = max(w.size for w in wavelets)
if max_size > n_times:
msg = (f'At least one of the wavelets ({max_size}) is longer than the '
f'signal ({n_times}). Consider using a longer signal or '
'shorter wavelets.')
if check:
if use_fft:
warn(msg, UserWarning)
else:
raise ValueError(msg)
nfft = n_times + max_size - 1
nfft = next_fast_len(nfft) # 2 ** int(np.ceil(np.log2(nfft)))
return nfft
def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
fsize : int
FFT length.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, default True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
from scipy.fft import fft, ifft
_check_option('mode', mode, ['same', 'valid', 'full'])
decim = _check_decim(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
_, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[:n_times + W.size - 1]
else:
# Work around multarray.correlate->OpenBLAS bug on ppc64le
# ret = np.correlate(x, W, mode=mode)
ret = (
np.convolve(x, W.real, mode=mode) +
1j * np.convolve(x, W.imag, mode=mode)
)
# Center and decimate decomposition
if mode == 'valid':
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step,
(offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == 'full' and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=None,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses complex exponentials windowed with multiple DPSS
tapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of ``out`` is ``(n_epochs, n_chans,
n_freqs, n_times)``, else it is ``(n_chans, n_freqs, n_times)``.
However, using multitaper method and output ``'complex'`` or
``'phase'`` results in shape of ``out`` being ``(n_epochs, n_chans,
n_tapers, n_freqs, n_times)``. If output is ``'avg_power_itc'``, the
real values in the ``output`` contain average power' and the imaginary
values contain the inter-trial coherence:
``out = avg_power + i * ITC``.
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape (n_epochs, n_chans, '
'n_times), got %s' % (epoch_data.shape,))
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
decim = _check_decim(decim)
if (freqs > sfreq / 2.).any():
raise ValueError('Cannot compute freq above Nyquist freq of the data '
'(%0.1f Hz), got %0.1f Hz'
% (sfreq / 2., freqs.max()))
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
n_freqs = len(freqs)
n_tapers = len(Ws)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float64
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
elif output in ['complex', 'phase'] and method == 'multitaper':
out = np.empty((n_chans, n_tapers, n_epochs, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
all_Ws = sum([list(W) for W in Ws], list())
_get_nfft(all_Ws, epoch_data, use_fft)
parallel, my_cwt, n_jobs = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim, method)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
if output in ['complex', 'phase'] and method == 'multitaper':
out = out.transpose(2, 0, 1, 3, 4)
else:
out = out.transpose(1, 0, 2, 3)
return out
def _check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output):
"""Aux. function to _compute_tfr to check the params validity."""
# Check freqs
if not isinstance(freqs, (list, np.ndarray)):
raise ValueError('freqs must be an array-like, got %s '
'instead.' % type(freqs))
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError('freqs must be of shape (n_freqs,), got %s '
'instead.' % np.array(freqs.shape))
# Check sfreq
if not isinstance(sfreq, (float, int)):
raise ValueError('sfreq must be a float or an int, got %s '
'instead.' % type(sfreq))
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == 'multitaper' if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError('zero_mean should be of type bool, got %s. instead'
% type(zero_mean))
freqs = np.asarray(freqs)
# Check n_cycles
if isinstance(n_cycles, (int, float)):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, (list, np.ndarray)):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(freqs):
raise ValueError('n_cycles must be a float or an array of length '
'%i frequencies, got %i cycles instead.' %
(len(freqs), len(n_cycles)))
else:
raise ValueError('n_cycles must be a float or an array, got %s '
'instead.' % type(n_cycles))
# Check time_bandwidth
if (method == 'morlet') and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == 'multitaper':
time_bandwidth = (4.0 if time_bandwidth is None
else float(time_bandwidth))
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError('use_fft must be a boolean, got %s '
'instead.' % type(use_fft))
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError('decim must be an integer or a slice, '
'got %s instead.' % type(decim))
# Check output
_check_option('output', output, ['complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc'])
_check_option('method', method, ['multitaper', 'morlet'])
return freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim,
method=None):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
method : str | None
Used only for multitapering to create tapers dimension in the output
if ``output in ['complex', 'phase']``.
"""
# Set output type
dtype = np.float64
if output in ['complex', 'avg_power_itc']:
dtype = np.complex128
# Init outputs
decim = _check_decim(decim)
n_tapers = len(Ws)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ('avg_' in output) or ('itc' in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
elif output in ['complex', 'phase'] and method == 'multitaper':
tfrs = np.zeros((n_tapers, n_epochs, n_freqs, n_times),
dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for taper_idx, W in enumerate(Ws):
# No need to check here, it's done earlier (outside parallel part)
nfft = _get_nfft(W, X, use_fft, check=False)
coefs = _cwt_gen(
X, W, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if 'itc' in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex128)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ['power', 'avg_power']:
tfr = (tfr * tfr.conj()).real # power
elif output == 'phase':
tfr = np.angle(tfr)
elif output == 'avg_power_itc':
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs ** 2 # power
elif output == 'itc':
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ('avg_' in output) or ('itc' in output):
tfrs += tfr
elif output in ['complex', 'phase'] and method == 'multitaper':
tfrs[taper_idx, epoch_idx] += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == 'avg_power_itc':
tfrs += 1j * np.abs(plf)
elif output == 'itc':
tfrs += np.abs(plf)
# Normalization of average metrics
if ('avg_' in output) or ('itc' in output):
tfrs /= n_epochs
# Normalization by number of taper
if n_tapers > 1 and output not in ['complex', 'phase']:
tfrs /= n_tapers
return tfrs
@fill_doc
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time-frequency decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
``use_fft=False``. Defaults to ``'same'``.
%(decim_tfr)s
Returns
-------
tfr : array, shape (n_signals, n_freqs, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets.
"""
nfft = _get_nfft(Ws, X, use_fft)
return _cwt_array(X, Ws, nfft, mode, decim, use_fft)
def _cwt_array(X, Ws, nfft, mode, decim, use_fft):
decim = _check_decim(decim)
coefs = _cwt_gen(
X, Ws, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
n_signals, n_times = X[:, decim].shape
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(method, inst, freqs, decim, return_itc, picks, average,
output=None, **tfr_params):
from ..epochs import BaseEpochs
"""Help reduce redundancy between tfr_morlet and tfr_multitaper."""
decim = _check_decim(decim)
data = _get_data(inst, return_itc)
info = inst.info.copy() # make a copy as sfreq can be altered
info, data = _prepare_picks(info, data, picks, axis=1)
del picks
if average:
if output == 'complex':
raise ValueError('output must be "power" if average=True')
if return_itc:
output = 'avg_power_itc'
else:
output = 'avg_power'
else:
output = 'power' if output is None else output
if return_itc:
raise ValueError('Inter-trial coherence is not supported'
' with average=False')
out = _compute_tfr(data, freqs, info['sfreq'], method=method,
output=output, decim=decim, **tfr_params)
times = inst.times[decim].copy()
with info._unlock():
info['sfreq'] /= decim.step
if average:
if return_itc:
power, itc = out.real, out.imag
else:
power = out
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='%s-power' % method)
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='%s-itc' % method))
else:
power = out
if isinstance(inst, BaseEpochs):
meta = deepcopy(inst._metadata)
evs = deepcopy(inst.events)
ev_id = deepcopy(inst.event_id)
selection = deepcopy(inst.selection)
drop_log = deepcopy(inst.drop_log)
else:
# if the input is of class Evoked
meta = evs = ev_id = selection = drop_log = None
out = EpochsTFR(info, power, times, freqs, method='%s-power' % method,
events=evs, event_id=ev_id, selection=selection,
drop_log=drop_log, metadata=meta)
return out
@verbose
def tfr_morlet(inst, freqs, n_cycles, use_fft=False, return_itc=True, decim=1,
n_jobs=None, picks=None, zero_mean=True, average=True,
output='power', verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_array_morlet`, but
operates on `~mne.Epochs` or `~mne.Evoked` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
%(freqs_tfr)s
%(n_cycles_tfr)s
use_fft : bool, default False
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
%(decim_tfr)s
%(n_jobs)s
picks : array-like of int | None, default None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
zero_mean : bool, default True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
%(average_tfr)s
output : str
Can be ``"power"`` (default) or ``"complex"``. If ``"complex"``, then
``average`` must be ``False``.
.. versionadded:: 0.15.0
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
%(temporal-window_tfr_notes)s
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=zero_mean, output=output)
return _tfr_aux('morlet', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
@verbose
def tfr_array_morlet(epoch_data, sfreq, freqs, n_cycles=7.0,
zero_mean=False, use_fft=True, decim=1, output='complex',
n_jobs=None, verbose=None):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_morlet`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
%(freqs_tfr)s
%(n_cycles_tfr)s
zero_mean : bool | False
If True, make sure the wavelets have a mean of zero. default False.
use_fft : bool
Use the FFT for convolutions or not. default True.
%(decim_tfr)s
output : str, default ``'complex'``
* ``'complex'`` : single trial complex.
* ``'power'`` : single trial power.
* ``'phase'`` : single trial phase.
* ``'avg_power'`` : average of single trial power.
* ``'itc'`` : inter-trial coherence.
* ``'avg_power_itc'`` : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Default 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data.
- if ``output in ('complex', 'phase', 'power')``, array of shape
``(n_epochs, n_chans, n_freqs, n_times)``
- else, array of shape ``(n_chans, n_freqs, n_times)``
If ``output`` is ``'avg_power_itc'``, the real values in ``out``
contain the average power and the imaginary values contain the ITC:
:math:`out = power_{avg} + i * itc`.
See Also
--------
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
%(temporal-window_tfr_notes)s
.. versionadded:: 0.14.0
"""
return _compute_tfr(epoch_data=epoch_data, freqs=freqs,
sfreq=sfreq, method='morlet', n_cycles=n_cycles,
zero_mean=zero_mean, time_bandwidth=None,
use_fft=use_fft, decim=decim, output=output,
n_jobs=n_jobs, verbose=verbose)
@verbose
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0,
use_fft=True, return_itc=True, decim=1,
n_jobs=None, picks=None, average=True, *, verbose=None):
"""Compute Time-Frequency Representation (TFR) using DPSS tapers.
Same computation as `~mne.time_frequency.tfr_array_multitaper`, but
operates on `~mne.Epochs` or `~mne.Evoked` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
%(freqs_tfr)s
%(n_cycles_tfr)s
%(time_bandwidth_tfr)s
use_fft : bool, default True
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged (or
single-trial) power.
%(decim_tfr)s
%(n_jobs)s
%(picks_good_data)s
%(average_tfr)s
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
mne.time_frequency.tfr_morlet
mne.time_frequency.tfr_array_morlet
Notes
-----
%(temporal-window_tfr_notes)s
%(time_bandwidth_tfr_notes)s
.. versionadded:: 0.9.0
"""
tfr_params = dict(n_cycles=n_cycles, n_jobs=n_jobs, use_fft=use_fft,
zero_mean=True, time_bandwidth=time_bandwidth)
return _tfr_aux('multitaper', inst, freqs, decim, return_itc, picks,
average, **tfr_params)
# TFR(s) class
class _BaseTFR(ContainsMixin, UpdateChannelsMixin, SizeMixin, TimeMixin):
"""Base TFR class."""
def __init__(self):
self.baseline = None
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
@property
def ch_names(self):
"""Channel names."""
return self.info['ch_names']
@fill_doc
def crop(self, tmin=None, tmax=None, fmin=None, fmax=None,
include_tmax=True):
"""Crop data to a given time interval in place.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
fmin : float | None
Lowest frequency of selection in Hz.
.. versionadded:: 0.18.0
fmax : float | None
Highest frequency of selection in Hz.
.. versionadded:: 0.18.0
%(include_tmax)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
"""
super().crop(tmin=tmin, tmax=tmax, include_tmax=include_tmax)
if fmin is not None or fmax is not None:
freq_mask = _freq_mask(self.freqs, sfreq=self.info['sfreq'],
fmin=fmin, fmax=fmax)
else:
freq_mask = slice(None)
self.freqs = self.freqs[freq_mask]
# Deal with broadcasting (boolean arrays do not broadcast, but indices
# do, so we need to convert freq_mask to make use of broadcasting)
if isinstance(freq_mask, np.ndarray):
freq_mask = np.where(freq_mask)[0]
self._data = self._data[..., freq_mask, :]
return self
def copy(self):
"""Return a copy of the instance.
Returns
-------
copy : instance of EpochsTFR | instance of AverageTFR
A copy of the instance.
"""
return deepcopy(self)
@verbose
def apply_baseline(self, baseline, mode='mean', verbose=None):
"""Baseline correct the data.
Parameters
----------
baseline : array-like, shape (2,)
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
%(verbose)s
Returns
-------
inst : instance of AverageTFR
The modified instance.
""" # noqa: E501
self.baseline = _check_baseline(baseline, times=self.times,
sfreq=self.info['sfreq'])
rescale(self.data, self.times, self.baseline, mode, copy=False)
return self
@verbose
def save(self, fname, overwrite=False, *, verbose=None):
"""Save TFR object to hdf5 file.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
%(overwrite)s
%(verbose)s
See Also
--------
read_tfrs, write_tfrs
"""
write_tfrs(fname, self, overwrite=overwrite)
@verbose
def to_data_frame(self, picks=None, index=None, long_format=False,
time_format=None, *, verbose=None):
"""Export data in tabular structure as a pandas DataFrame.
Channels are converted to columns in the DataFrame. By default,
additional columns ``'time'``, ``'freq'``, ``'epoch'``, and
``'condition'`` (epoch event description) are added, unless ``index``
is not ``None`` (in which case the columns specified in ``index`` will
be used to form the DataFrame's index instead). ``'epoch'``, and
``'condition'`` are not supported for ``AverageTFR``.
Parameters
----------
%(picks_all)s
%(index_df_epo)s
Valid string values are ``'time'``, ``'freq'``, ``'epoch'``, and
``'condition'`` for ``EpochsTFR`` and ``'time'`` and ``'freq'``
for ``AverageTFR``.
Defaults to ``None``.
%(long_format_df_epo)s
%(time_format_df)s
.. versionadded:: 0.23
%(verbose)s
Returns
-------
%(df_return)s
"""
# check pandas once here, instead of in each private utils function
pd = _check_pandas_installed() # noqa
# arg checking
valid_index_args = ['time', 'freq']
if isinstance(self, EpochsTFR):
valid_index_args.extend(['epoch', 'condition'])
valid_time_formats = ['ms', 'timedelta']
index = _check_pandas_index_arguments(index, valid_index_args)
time_format = _check_time_format(time_format, valid_time_formats)
# get data
times = self.times
picks = _picks_to_idx(self.info, picks, 'all', exclude=())
if isinstance(self, EpochsTFR):
data = self.data[:, picks, :, :]
else:
data = self.data[np.newaxis, picks] # add singleton "epochs" axis
n_epochs, n_picks, n_freqs, n_times = data.shape
# reshape to (epochs*freqs*times) x signals
data = np.moveaxis(data, 1, -1)
data = data.reshape(n_epochs * n_freqs * n_times, n_picks)
# prepare extra columns / multiindex
mindex = list()
times = np.tile(times, n_epochs * n_freqs)
times = _convert_times(self, times, time_format)
mindex.append(('time', times))
freqs = self.freqs
freqs = np.tile(np.repeat(freqs, n_times), n_epochs)
mindex.append(('freq', freqs))
if isinstance(self, EpochsTFR):
mindex.append(('epoch', np.repeat(self.selection,
n_times * n_freqs)))
rev_event_id = {v: k for k, v in self.event_id.items()}
conditions = [rev_event_id[k] for k in self.events[:, 2]]
mindex.append(('condition', np.repeat(conditions,
n_times * n_freqs)))
assert all(len(mdx) == len(mindex[0]) for mdx in mindex)
# build DataFrame
if isinstance(self, EpochsTFR):
default_index = ['condition', 'epoch', 'freq', 'time']
else:
default_index = ['freq', 'time']
df = _build_data_frame(self, data, picks, long_format, mindex, index,
default_index=default_index)
return df
@fill_doc
class AverageTFR(_BaseTFR):
"""Container for Time-Frequency data.
Can for example store induced power at sensor level or inter-trial
coherence.
Parameters
----------
%(info_not_none)s
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
%(verbose)s
Attributes
----------
%(info_not_none)s
ch_names : list
The names of the channels.
nave : int
Number of averaged epochs.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None): # noqa: D102
super().__init__()
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self._set_times(np.array(times, dtype=float))
self._raw_times = self.times.copy()
self.freqs = np.array(freqs, dtype=float)
self.nave = nave
self.comment = comment
self.method = method
self.preload = True
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=0.1, combine=None,
exclude=[], cnorm=None, verbose=None):
"""Plot TFRs as a two-dimensional image(s).
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple, shape (2,)
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean') (default)
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used. Defaults to None.
tmax : None | float
The last time instant to display. If None the last time point
available is used. Defaults to None.
fmin : None | float
The first frequency to display. If None the first frequency
available is used. Defaults to None.
fmax : None | float
The last frequency to display. If None the last frequency
available is used. Defaults to None.
vmin : float | None
The minimum value an the color scale. If vmin is None, the data
minimum value is used. Defaults to None.
vmax : float | None
The maximum value an the color scale. If vmax is None, the data
maximum value is used. Defaults to None.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 10*log10 is applied to the data to get dB.
Defaults to False.
colorbar : bool
If true, colorbar will be added to the plot. Defaults to True.
show : bool
Call pyplot.show() at the end. Defaults to True.
title : str | 'auto' | None
String for ``title``. Defaults to None (blank/no title). If
'auto', and ``combine`` is None, the title for each figure
will be the channel name. If 'auto' and ``combine`` is not None,
``title`` states how many channels were combined into that figure
and the method that was used for ``combine``. If str, that String
will be the title for each figure.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as ``picks``. If instance of Axes, there must be
only one channel plotted. If ``combine`` is not None, ``axes``
must either be an instance of Axes, or a list of length 1.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
.. versionadded:: 0.14.0
mask : ndarray | None
An array of booleans of the same shape as the data. Entries of the
data that correspond to False in the mask are plotted
transparently. Useful for, e.g., masking for statistical
significance.
.. versionadded:: 0.16.0
mask_style : None | 'both' | 'contour' | 'mask'
If ``mask`` is not None: if ``'contour'``, a contour line is drawn
around the masked areas (``True`` in ``mask``). If ``'mask'``,
entries not ``True`` in ``mask`` are shown transparently. If
``'both'``, both a contour and transparency are used.
If ``None``, defaults to ``'both'`` if ``mask`` is not None, and is
ignored otherwise.
.. versionadded:: 0.17
mask_cmap : matplotlib colormap | (colormap, bool) | 'interactive'
The colormap chosen for masked parts of the image (see below), if
``mask`` is not ``None``. If None, ``cmap`` is reused. Defaults to
``'Greys'``. Not interactive. Otherwise, as ``cmap``.
.. versionadded:: 0.17
mask_alpha : float
A float between 0 and 1. If ``mask`` is not None, this sets the
alpha level (degree of transparency) for the masked-out segments.
I.e., if 0, masked-out segments are not visible at all.
Defaults to 0.1.
.. versionadded:: 0.16.0
combine : 'mean' | 'rms' | callable | None
Type of aggregation to perform across selected channels. If
None, plot one figure per selected channel. If a function, it must
operate on an array of shape ``(n_channels, n_freqs, n_times)`` and
return an array of shape ``(n_freqs, n_times)``.
.. versionchanged:: 1.3
Added support for ``callable``.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list.
%(cnorm)s
.. versionadded:: 0.24
%(verbose)s
Returns
-------
figs : list of instances of matplotlib.figure.Figure
A list of figures containing the time-frequency power.
""" # noqa: E501
return self._plot(picks=picks, baseline=baseline, mode=mode,
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=show, title=title,
axes=axes, layout=layout, yscale=yscale, mask=mask,
mask_style=mask_style, mask_cmap=mask_cmap,
mask_alpha=mask_alpha, combine=combine,
exclude=exclude, cnorm=cnorm, verbose=verbose)
@verbose
def _plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True, title=None,
axes=None, layout=None, yscale='auto', mask=None,
mask_style=None, mask_cmap="Greys", mask_alpha=.25,
combine=None, exclude=None, copy=True,
source_plot_joint=False, topomap_args=dict(), ch_type=None,
cnorm=None, verbose=None):
"""Plot TFRs as a two-dimensional image(s).
See self.plot() for parameters description.
"""
import matplotlib.pyplot as plt
from ..viz.topo import _imshow_tfr
# channel selection
# simply create a new tfr object(s) with the desired channel selection
tfr = _preproc_tfr_instance(
self, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB, mode,
baseline, exclude, copy)
del picks
data = tfr.data
n_picks = len(tfr.ch_names) if combine is None else 1
# combine picks
_validate_type(combine, (None, str, "callable"))
if isinstance(combine, str):
_check_option("combine", combine, ("mean", "rms"))
if combine == 'mean':
data = data.mean(axis=0, keepdims=True)
elif combine == 'rms':
data = np.sqrt((data ** 2).mean(axis=0, keepdims=True))
elif combine is not None: # callable
# It must operate on (n_channels, n_freqs, n_times) and return
# (n_freqs, n_times). Operates on a copy in-case 'combine' does
# some in-place operations.
try:
data = combine(data.copy())
except TypeError:
raise RuntimeError(
"A callable 'combine' must operate on a single argument, "
"a numpy array of shape (n_channels, n_freqs, n_times)."
)
if (
not isinstance(data, np.ndarray)
or data.shape != tfr.data.shape[1:]
):
raise RuntimeError(
"A callable 'combine' must return a numpy array of shape "
"(n_freqs, n_times)."
)
# keep initial dimensions
data = data[np.newaxis]
# figure overhead
# set plot dimension
tmin, tmax = tfr.times[[0, -1]]
if vmax is None:
vmax = np.abs(data).max()
if vmin is None:
vmin = -np.abs(data).max()
# set colorbar
cmap = _setup_cmap(cmap)
# make sure there are as many axes as there will be channels to plot
if isinstance(axes, list) or isinstance(axes, np.ndarray):
figs_and_axes = [(ax.get_figure(), ax) for ax in axes]
elif isinstance(axes, plt.Axes):
figs_and_axes = [(ax.get_figure(), ax) for ax in [axes]]
elif axes is None:
figs = [plt.figure() for i in range(n_picks)]
figs_and_axes = [(fig, fig.add_subplot(111)) for fig in figs]
else:
raise ValueError('axes must be None, plt.Axes, or list '
'of plt.Axes.')
if len(figs_and_axes) != n_picks:
raise RuntimeError('There must be an axes for each picked '
'channel.')
for idx in range(n_picks):
fig = figs_and_axes[idx][0]
ax = figs_and_axes[idx][1]
onselect_callback = partial(
tfr._onselect, cmap=cmap, source_plot_joint=source_plot_joint,
topomap_args={k: v for k, v in topomap_args.items()
if k not in {"vmin", "vmax", "cmap", "axes"}})
_imshow_tfr(
ax, 0, tmin, tmax, vmin, vmax, onselect_callback, ylim=None,
tfr=data[idx: idx + 1], freq=tfr.freqs, x_label='Time (s)',
y_label='Frequency (Hz)', colorbar=colorbar, cmap=cmap,
yscale=yscale, mask=mask, mask_style=mask_style,
mask_cmap=mask_cmap, mask_alpha=mask_alpha, cnorm=cnorm)
if title == 'auto':
if len(tfr.info['ch_names']) == 1 or combine is None:
subtitle = tfr.info['ch_names'][idx]
else:
subtitle = _set_title_multiple_electrodes(
None, combine, tfr.info["ch_names"], all_=True,
ch_type=ch_type)
else:
subtitle = title
fig.suptitle(subtitle)
plt_show(show)
return [fig for (fig, ax) in figs_and_axes]
@verbose
def plot_joint(self, timefreqs=None, picks=None, baseline=None,
mode='mean', tmin=None, tmax=None, fmin=None, fmax=None,
vmin=None, vmax=None, cmap='RdBu_r', dB=False,
colorbar=True, show=True, title=None,
yscale='auto', combine='mean', exclude=[],
topomap_args=None, image_args=None, verbose=None):
"""Plot TFRs as a two-dimensional image with topomaps.
Parameters
----------
timefreqs : None | list of tuple | dict of tuple
The time-frequency point(s) for which topomaps will be plotted.
See Notes.
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None, the beginning of the data is used.
If b is None, then b is set to the end of the interval.
If baseline is equal to (None, None), the entire time
interval is used.
mode : None | str
If str, must be one of 'ratio', 'zscore', 'mean', 'percent',
'logratio' and 'zlogratio'.
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)),
mean simply subtracts the mean power, percent is the same as
applying ratio then mean, logratio is the same as mean but then
rendered in log-scale, zlogratio is the same as zscore but data
is rendered in log-scale first.
If None no baseline correction is applied.
%(tmin_tmax_psd)s
%(fmin_fmax_psd)s
vmin : float | None
The minimum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmin is None, the data
absolute minimum value is used.
vmax : float | None
The maximum value of the color scale for the image (for
topomaps, see ``topomap_args``). If vmax is None, the data
absolute maximum value is used.
cmap : matplotlib colormap
The colormap to use.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot (relating to the
topomaps). For user defined axes, the colorbar cannot be drawn.
Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
combine : 'mean' | 'rms' | callable
Type of aggregation to perform across selected channels. If a
function, it must operate on an array of shape
``(n_channels, n_freqs, n_times)`` and return an array of shape
``(n_freqs, n_times)``.
.. versionchanged:: 1.3
Added support for ``callable``.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded. Defaults to an empty list, i.e., ``[]``.
topomap_args : None | dict
A dict of ``kwargs`` that are forwarded to
:func:`mne.viz.plot_topomap` to style the topomaps. ``axes`` and
``show`` are ignored. If ``times`` is not in this dict, automatic
peak detection is used. Beyond that, if ``None``, no customizable
arguments will be passed.
Defaults to ``None``.
image_args : None | dict
A dict of ``kwargs`` that are forwarded to :meth:`AverageTFR.plot`
to style the image. ``axes`` and ``show`` are ignored. Beyond that,
if ``None``, no customizable arguments will be passed.
Defaults to ``None``.
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
Notes
-----
``timefreqs`` has three different modes: tuples, dicts, and auto.
For (list of) tuple(s) mode, each tuple defines a pair
(time, frequency) in s and Hz on the TFR plot. For example, to
look at 10 Hz activity 1 second into the epoch and 3 Hz activity
300 msec into the epoch, ::
timefreqs=((1, 10), (.3, 3))
If provided as a dictionary, (time, frequency) tuples are keys and
(time_window, frequency_window) tuples are the values - indicating the
width of the windows (centered on the time and frequency indicated by
the key) to be averaged over. For example, ::
timefreqs={(1, 10): (0.1, 2)}
would translate into a window that spans 0.95 to 1.05 seconds, as
well as 9 to 11 Hz. If None, a single topomap will be plotted at the
absolute peak across the time-frequency representation.
.. versionadded:: 0.16.0
""" # noqa: E501
from ..viz.topomap import (_set_contour_locator, plot_topomap,
_get_pos_outlines, _find_topomap_coords)
import matplotlib.pyplot as plt
#####################################
# Handle channels (picks and types) #
#####################################
# it would be nicer to let this happen in self._plot,
# but we need it here to do the loop over the remaining channel
# types in case a user supplies `picks` that pre-select only one
# channel type.
# Nonetheless, it should be refactored for code reuse.
copy = any(var is not None for var in (exclude, picks, baseline))
tfr = _pick_inst(self, picks, exclude, copy=copy)
del picks
ch_types = _get_channel_types(tfr.info, unique=True)
# if multiple sensor types: one plot per channel type, recursive call
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one "
"figure per type.")
figs = list()
for this_type in ch_types: # pick corresponding channel type
type_picks = [idx for idx in range(tfr.info['nchan'])
if channel_type(tfr.info, idx) == this_type]
tf_ = _pick_inst(tfr, type_picks, None, copy=True)
if len(_get_channel_types(tf_.info, unique=True)) > 1:
raise RuntimeError(
'Possibly infinite loop due to channel selection '
'problem. This should never happen! Please check '
'your channel types.')
figs.append(
tf_.plot_joint(
timefreqs=timefreqs, picks=None, baseline=baseline,
mode=mode, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=colorbar, show=False, title=title,
yscale=yscale, combine=combine,
exclude=None, topomap_args=topomap_args,
verbose=verbose))
return figs
else:
ch_type = ch_types.pop()
# Handle timefreqs
timefreqs = _get_timefreqs(tfr, timefreqs)
n_timefreqs = len(timefreqs)
if topomap_args is None:
topomap_args = dict()
topomap_args_pass = {k: v for k, v in topomap_args.items() if
k not in ('axes', 'show', 'colorbar')}
topomap_args_pass['outlines'] = topomap_args.get('outlines', 'head')
topomap_args_pass["contours"] = topomap_args.get('contours', 6)
topomap_args_pass['ch_type'] = ch_type
##############
# Image plot #
##############
fig, tf_ax, map_ax, cbar_ax = _prepare_joint_axes(n_timefreqs)
cmap = _setup_cmap(cmap)
# image plot
# we also use this to baseline and truncate (times and freqs)
# (a copy of) the instance
if image_args is None:
image_args = dict()
fig = tfr._plot(
picks=None, baseline=baseline, mode=mode, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, vmin=vmin, vmax=vmax, cmap=cmap, dB=dB,
colorbar=False, show=False, title=title, axes=tf_ax,
yscale=yscale, combine=combine, exclude=None, copy=False,
source_plot_joint=True, topomap_args=topomap_args_pass,
ch_type=ch_type, **image_args)[0]
# set and check time and freq limits ...
# can only do this after the tfr plot because it may change these
# parameters
tmax, tmin = tfr.times.max(), tfr.times.min()
fmax, fmin = tfr.freqs.max(), tfr.freqs.min()
for time, freq in timefreqs.keys():
if not (tmin <= time <= tmax):
error_value = "time point (" + str(time) + " s)"
elif not (fmin <= freq <= fmax):
error_value = "frequency (" + str(freq) + " Hz)"
else:
continue
raise ValueError("Requested " + error_value + " exceeds the range"
"of the data. Choose different `timefreqs`.")
############
# Topomaps #
############
titles, all_data, all_pos, vlims = [], [], [], []
# the structure here is a bit complicated to allow aggregating vlims
# over all topomaps. First, one loop over all timefreqs to collect
# vlims. Then, find the max vlims and in a second loop over timefreqs,
# do the actual plotting.
timefreqs_array = np.array([np.array(keys) for keys in timefreqs])
order = timefreqs_array[:, 0].argsort() # sort by time
for ii, (time, freq) in enumerate(timefreqs_array[order]):
avg = timefreqs[(time, freq)]
# set up symmetric windows
time_half_range, freq_half_range = avg / 2.
if time_half_range == 0:
time = tfr.times[np.argmin(np.abs(tfr.times - time))]
if freq_half_range == 0:
freq = tfr.freqs[np.argmin(np.abs(tfr.freqs - freq))]
if (time_half_range == 0) and (freq_half_range == 0):
sub_map_title = '(%.2f s,\n%.1f Hz)' % (time, freq)
else:
sub_map_title = \
'(%.1f \u00B1 %.1f s,\n%.1f \u00B1 %.1f Hz)' % \
(time, time_half_range, freq, freq_half_range)
tmin = time - time_half_range
tmax = time + time_half_range
fmin = freq - freq_half_range
fmax = freq + freq_half_range
data = tfr.data
# merging grads here before rescaling makes ERDs visible
sphere = topomap_args.get('sphere')
if ch_type == 'grad':
picks = _pair_grad_sensors(tfr.info, topomap_coords=False)
pos = _find_topomap_coords(
tfr.info, picks=picks[::2], sphere=sphere)
method = combine if isinstance(combine, str) else "rms"
data, _ = _merge_ch_data(data[picks], ch_type, [],
method=method)
del picks, method
else:
pos, _ = _get_pos_outlines(tfr.info, None, sphere)
del sphere
all_pos.append(pos)
data, times, freqs, _, _ = _preproc_tfr(
data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, None, tfr.info['sfreq'])
vlims.append(np.abs(data).max())
titles.append(sub_map_title)
all_data.append(data)
new_t = tfr.times[np.abs(tfr.times - np.median([times])).argmin()]
new_f = tfr.freqs[np.abs(tfr.freqs - np.median([freqs])).argmin()]
timefreqs_array[ii] = (new_t, new_f)
# passing args to the topomap calls
max_lim = max(vlims)
_vlim = list(topomap_args.get('vlim', (None, None)))
# fall back on ± max_lim
for sign, index in zip((-1, 1), (0, 1)):
if _vlim[index] is None:
_vlim[index] = sign * max_lim
topomap_args_pass['vlim'] = tuple(_vlim)
locator, contours = _set_contour_locator(
*_vlim, topomap_args_pass["contours"])
topomap_args_pass['contours'] = contours
for ax, title, data, pos in zip(map_ax, titles, all_data, all_pos):
ax.set_title(title)
plot_topomap(data.mean(axis=(-1, -2)), pos,
cmap=cmap[0], axes=ax, show=False,
**topomap_args_pass)
#############
# Finish up #
#############
if colorbar:
from matplotlib import ticker
cbar = plt.colorbar(ax.images[0], cax=cbar_ax)
if locator is None:
locator = ticker.MaxNLocator(nbins=5)
cbar.locator = locator
cbar.update_ticks()
plt.subplots_adjust(left=.12, right=.925, bottom=.14,
top=1. if title is not None else 1.2)
# draw the connection lines between time series and topoplots
lines = [_connection_line(time_, fig, tf_ax, map_ax_, y=freq_,
y_source_transform="transData")
for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax)]
fig.lines.extend(lines)
plt_show(show)
return fig
@verbose
def _onselect(self, eclick, erelease, baseline=None, mode=None,
cmap=None, source_plot_joint=False, topomap_args=None,
verbose=None):
"""Handle rubber band selector in channel tfr."""
from ..viz.topomap import plot_tfr_topomap, plot_topomap, _add_colorbar
if abs(eclick.x - erelease.x) < .1 or abs(eclick.y - erelease.y) < .1:
return
tmin = round(min(eclick.xdata, erelease.xdata), 5) # s
tmax = round(max(eclick.xdata, erelease.xdata), 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info('The selected area is too small. '
'Select a larger time-frequency window.')
return
types = list()
if 'eeg' in self:
types.append('eeg')
if 'mag' in self:
types.append('mag')
if 'grad' in self:
if len(_pair_grad_sensors(self.info, topomap_coords=False,
raise_error=False)) >= 2:
types.append('grad')
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle('{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz'.format(
tmin, tmax, fmin, fmax), y=0.04)
if source_plot_joint:
ax = fig.add_subplot(111)
data = _preproc_tfr(
self.data, self.times, self.freqs, tmin, tmax, fmin, fmax,
None, None, None, None, None, self.info['sfreq'])[0]
data = data.mean(-1).mean(-1)
vmax = np.abs(data).max()
im, _ = plot_topomap(data, self.info, vlim=(-vmax, vmax),
cmap=cmap[0], axes=ax, show=False,
**topomap_args)
_add_colorbar(ax, im, cmap, title="AU", pad=.1)
fig.show()
else:
for idx, ch_type in enumerate(types):
ax = fig.add_subplot(1, len(types), idx + 1)
plot_tfr_topomap(self, ch_type=ch_type, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax,
baseline=baseline, mode=mode, cmap=None,
title=ch_type, vmin=None, vmax=None, axes=ax)
@verbose
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', fig_background=None,
font_color='w', yscale='auto', verbose=None):
"""Plot TFRs in a topography with images.
Parameters
----------
%(picks_good_data)s
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The minimum value of the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maximum value of the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 10*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
Matplotlib borders style to be used for each sensor plot.
fig_facecolor : color
The figure face color. Defaults to black.
fig_background : None | array
A background image for the figure. This must be a valid input to
`matplotlib.pyplot.imshow`. Defaults to None.
font_color : color
The color of tick labels in the colorbar. Defaults to white.
yscale : 'auto' (default) | 'linear' | 'log'
The scale of y (frequency) axis. 'linear' gives linear y axis,
'log' leads to log-spaced y axis and 'auto' detects if frequencies
are log-spaced and only then sets the y axis to 'log'.
%(verbose)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
from ..viz import add_background_image
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data = _prepare_picks(info, data, picks, axis=0)
del picks
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB, info['sfreq'])
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(self._onselect, baseline=baseline,
mode=mode)
click_fun = partial(_imshow_tfr, tfr=data, freq=freqs, yscale=yscale,
cmap=(cmap, True), onselect=onselect_callback)
imshow = partial(_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap,
onselect=onselect_callback)
fig = _plot_topo(info=info, times=times, show_func=imshow,
click_func=click_fun, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (s)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor, font_color=font_color,
unified=True, img=True)
add_background_image(fig, fig_background)
plt_show(show)
return fig
@fill_doc
def plot_topomap(
self, tmin=None, tmax=None, fmin=0., fmax=np.inf, *, ch_type=None,
baseline=None, mode='mean', sensors=True, show_names=False,
mask=None, mask_params=None, contours=6, outlines='head',
sphere=None, image_interp=_INTERPOLATION_DEFAULT,
extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT, res=64,
size=2, cmap=None, vlim=(None, None), cnorm=None, colorbar=True,
cbar_fmt='%1.1e', units=None, axes=None, show=True):
"""Plot topographic maps of time-frequency intervals of TFR data.
Parameters
----------
%(tmin_tmax_psd)s
%(fmin_fmax_psd)s
%(ch_type_topomap_psd)s
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'mean' | 'ratio' | 'logratio' | 'percent' | 'zscore' | 'zlogratio'
Perform baseline correction by
- subtracting the mean of baseline values ('mean')
- dividing by the mean of baseline values ('ratio')
- dividing by the mean of baseline values and taking the log
('logratio')
- subtracting the mean of baseline values followed by dividing by
the mean of baseline values ('percent')
- subtracting the mean of baseline values and dividing by the
standard deviation of baseline values ('zscore')
- dividing by the mean of baseline values, taking the log, and
dividing by the standard deviation of log baseline values
('zlogratio')
%(sensors_topomap)s
%(show_names_topomap)s
%(mask_evoked_topomap)s
%(mask_params_topomap)s
%(contours_topomap)s
%(outlines_topomap)s
%(sphere_topomap_auto)s
%(image_interp_topomap)s
%(extrapolate_topomap)s
%(border_topomap)s
%(res_topomap)s
%(size_topomap)s
%(cmap_topomap)s
%(vlim_plot_topomap)s
.. versionadded:: 1.2
%(cnorm)s
.. versionadded:: 1.2
%(colorbar_topomap)s
%(cbar_fmt_topomap)s
%(units_topomap)s
%(axes_plot_topomap)s
%(show)s
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
""" # noqa: E501
from ..viz import plot_tfr_topomap
# TODO units => unit
return plot_tfr_topomap(
self, tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, ch_type=ch_type,
baseline=baseline, mode=mode, sensors=sensors,
show_names=show_names, mask=mask, mask_params=mask_params,
contours=contours, outlines=outlines, sphere=sphere,
image_interp=image_interp, extrapolate=extrapolate, border=border,
res=res, size=size, cmap=cmap, vlim=vlim, cnorm=cnorm,
colorbar=colorbar, cbar_fmt=cbar_fmt, units=units,
axes=axes, show=show)
def _check_compat(self, tfr):
"""Check that self and tfr have the same time-frequency ranges."""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr): # noqa: D105
"""Add instances."""
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr): # noqa: D105
"""Subtract instances."""
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr): # noqa: D105
self._check_compat(tfr)
self.data -= tfr.data
return self
def __truediv__(self, a): # noqa: D105
"""Divide instances."""
out = self.copy()
out /= a
return out
def __itruediv__(self, a): # noqa: D105
self.data /= a
return self
def __mul__(self, a):
"""Multiply source instances."""
out = self.copy()
out *= a
return out
def __imul__(self, a): # noqa: D105
self.data *= a
return self
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<AverageTFR | %s>" % s
@fill_doc
class EpochsTFR(_BaseTFR, GetEpochsMixin):
"""Container for Time-Frequency data on epochs.
Can for example store induced power at sensor level.
Parameters
----------
%(info_not_none)s
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : str | None, default None
Comment on the data, e.g., the experimental condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
The events as stored in the Epochs class. If None (default), all event
values are set to 1 and event time-samples are set to range(n_epochs).
event_id : dict | None
Example: dict(auditory=1, visual=3). They keys can be used to access
associated events. If None, all events will be used and a dict is
created with string integer names corresponding to the event id
integers.
selection : iterable | None
Iterable of indices of selected epochs. If ``None``, will be
automatically generated, corresponding to all non-zero events.
.. versionadded:: 0.23
drop_log : tuple | None
Tuple of tuple of strings indicating which epochs have been marked to
be ignored.
.. versionadded:: 0.23
metadata : instance of pandas.DataFrame | None
A :class:`pandas.DataFrame` containing pertinent information for each
trial. See :class:`mne.Epochs` for further details.
%(verbose)s
Attributes
----------
%(info_not_none)s
ch_names : list
The names of the channels.
data : ndarray, shape (n_epochs, n_channels, n_freqs, n_times)
The data array.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
comment : string
Comment on dataset. Can be the condition.
method : str | None, default None
Comment on the method used to compute the data, e.g., morlet wavelet.
events : ndarray, shape (n_events, 3) | None
Array containing sample information as event_id
event_id : dict | None
Names of conditions correspond to event_ids
selection : array
List of indices of selected events (not dropped or ignored etc.). For
example, if the original event array had 4 events and the second event
has been dropped, this attribute would be np.array([0, 2, 3]).
drop_log : tuple of tuple
A tuple of the same length as the event array used to initialize the
``EpochsTFR`` object. If the i-th original event is still part of the
selection, drop_log[i] will be an empty tuple; otherwise it will be
a tuple of the reasons the event is not longer in the selection, e.g.:
- ``'IGNORED'``
If it isn't part of the current subset defined by the user
- ``'NO_DATA'`` or ``'TOO_SHORT'``
If epoch didn't contain enough data names of channels that
exceeded the amplitude threshold
- ``'EQUALIZED_COUNTS'``
See :meth:`~mne.Epochs.equalize_event_counts`
- ``'USER'``
For user-defined reasons (see :meth:`~mne.Epochs.drop`).
metadata : pandas.DataFrame, shape (n_events, n_cols) | None
DataFrame containing pertinent information for each trial
Notes
-----
.. versionadded:: 0.13.0
"""
@verbose
def __init__(self, info, data, times, freqs, comment=None, method=None,
events=None, event_id=None, selection=None,
drop_log=None, metadata=None, verbose=None):
# noqa: D102
super().__init__()
self.info = info
if data.ndim != 4:
raise ValueError('data should be 4d. Got %d.' % data.ndim)
n_epochs, n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
if events is None:
n_epochs = len(data)
events = _gen_events(n_epochs)
if selection is None:
n_epochs = len(data)
selection = np.arange(n_epochs)
if drop_log is None:
n_epochs_prerejection = max(len(events), max(selection) + 1)
drop_log = tuple(
() if k in selection else ('IGNORED',)
for k in range(n_epochs_prerejection))
else:
drop_log = drop_log
# check consistency:
assert len(selection) == len(events)
assert len(drop_log) >= len(events)
assert len(selection) == sum(
(len(dl) == 0 for dl in drop_log))
event_id = _check_event_id(event_id, events)
self.data = data
self._set_times(np.array(times, dtype=float))
self._raw_times = self.times.copy() # needed for decimate
self._decim = 1
self.freqs = np.array(freqs, dtype=float)
self.events = events
self.event_id = event_id
self.selection = selection
self.drop_log = drop_log
self.comment = comment
self.method = method
self.preload = True
self.metadata = metadata
@property
def _detrend_picks(self):
return list()
def __repr__(self): # noqa: D105
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", epochs : %d" % self.data.shape[0]
s += ', channels : %d' % self.data.shape[1]
s += ', ~%s' % (sizeof_fmt(self._size),)
return "<EpochsTFR | %s>" % s
def __abs__(self):
"""Take the absolute value."""
epochs = self.copy()
epochs.data = np.abs(self.data)
return epochs
def average(self, method='mean', dim='epochs', copy=False):
"""Average the data across epochs.
Parameters
----------
method : str | callable
How to combine the data. If "mean"/"median", the mean/median
are returned. Otherwise, must be a callable which, when passed
an array of shape (n_epochs, n_channels, n_freqs, n_time)
returns an array of shape (n_channels, n_freqs, n_time).
Note that due to file type limitations, the kind for all
these will be "average".
dim : 'epochs' | 'freqs' | 'times'
The dimension along which to combine the data.
copy : bool
Whether to return a copy of the modified instance,
or modify in place. Ignored when ``dim='epochs'``
because a new instance must be returned.
Returns
-------
ave : instance of AverageTFR | EpochsTFR
The averaged data.
Notes
-----
Passing in ``np.median`` is considered unsafe when there is complex
data because NumPy doesn't compute the marginal median. Numpy currently
sorts the complex values by real part and return whatever value is
computed. Use with caution. We use the marginal median in the
complex case (i.e. the median of each component separately) if
one passes in ``median``. See a discussion in scipy:
https://github.com/scipy/scipy/pull/12676#issuecomment-783370228
"""
_check_option('dim', dim, ('epochs', 'freqs', 'times'))
axis = dict(epochs=0, freqs=2, times=self.data.ndim - 1)[dim]
# return a lambda function for computing a combination metric
# over epochs
func = _check_combine(mode=method, axis=axis)
data = func(self.data)
n_epochs, n_channels, n_freqs, n_times = self.data.shape
freqs, times = self.freqs, self.times
if dim == 'freqs':
freqs = np.mean(self.freqs, keepdims=True)
n_freqs = 1
elif dim == 'times':
times = np.mean(self.times, keepdims=True)
n_times = 1
if dim == 'epochs':
expected_shape = self._data.shape[1:]
else:
expected_shape = (n_epochs, n_channels, n_freqs, n_times)
data = np.expand_dims(data, axis=axis)
if data.shape != expected_shape:
raise RuntimeError(
f'You passed a function that resulted in data of shape '
f'{data.shape}, but it should be {expected_shape}.')
if dim == 'epochs':
return AverageTFR(info=self.info.copy(), data=data,
times=times, freqs=freqs,
nave=self.data.shape[0], method=self.method,
comment=self.comment)
elif copy:
return EpochsTFR(info=self.info.copy(), data=data,
times=times, freqs=freqs, method=self.method,
comment=self.comment, metadata=self.metadata,
events=self.events, event_id=self.event_id)
else:
self.data = data
self._set_times(times)
self.freqs = freqs
return self
def combine_tfr(all_tfr, weights='nave'):
"""Merge AverageTFR data by weighted addition.
Create a new AverageTFR instance, using a combination of the supplied
instances as its data. By default, the mean (weighted by trials) is used.
Subtraction can be performed by passing negative weights (e.g., [1, -1]).
Data must have the same channels and the same time instants.
Parameters
----------
all_tfr : list of AverageTFR
The tfr datasets.
weights : list of float | str
The weights to apply to the data of each AverageTFR instance.
Can also be ``'nave'`` to weight according to tfr.nave,
or ``'equal'`` to use equal weighting (each weighted as ``1/N``).
Returns
-------
tfr : AverageTFR
The new TFR data.
Notes
-----
.. versionadded:: 0.11.0
"""
tfr = all_tfr[0].copy()
if isinstance(weights, str):
if weights not in ('nave', 'equal'):
raise ValueError('Weights must be a list of float, or "nave" or '
'"equal"')
if weights == 'nave':
weights = np.array([e.nave for e in all_tfr], float)
weights /= weights.sum()
else: # == 'equal'
weights = [1. / len(all_tfr)] * len(all_tfr)
weights = np.array(weights, float)
if weights.ndim != 1 or weights.size != len(all_tfr):
raise ValueError('Weights must be the same size as all_tfr')
ch_names = tfr.ch_names
for t_ in all_tfr[1:]:
assert t_.ch_names == ch_names, ValueError("%s and %s do not contain "
"the same channels"
% (tfr, t_))
assert np.max(np.abs(t_.times - tfr.times)) < 1e-7, \
ValueError("%s and %s do not contain the same time instants"
% (tfr, t_))
# use union of bad channels
bads = list(set(tfr.info['bads']).union(*(t_.info['bads']
for t_ in all_tfr[1:])))
tfr.info['bads'] = bads
# XXX : should be refactored with combined_evoked function
tfr.data = sum(w * t_.data for w, t_ in zip(weights, all_tfr))
tfr.nave = max(int(1. / sum(w ** 2 / e.nave
for w, e in zip(weights, all_tfr))), 1)
return tfr
# Utils
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time."""
from ..epochs import BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis].copy()
return data
def _prepare_picks(info, data, picks, axis):
"""Prepare the picks."""
picks = _picks_to_idx(info, picks, exclude='bads')
info = pick_info(info, picks)
sl = [slice(None)] * data.ndim
sl[axis] = picks
data = data[tuple(sl)]
return info, data
def _centered(arr, newsize):
"""Aux Function to center data."""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, sfreq, copy=None):
"""Aux Function to prepare tfr computation."""
if copy is None:
copy = baseline is not None
data = rescale(data, times, baseline, mode, copy=copy)
if np.iscomplexobj(data):
# complex amplitude → real power (for plotting); if data are
# real-valued they should already be power
data = (data * data.conj()).real
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax, sfreq=sfreq))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax, sfreq=sfreq))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
if dB:
data = 10 * np.log10(data)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
def _check_decim(decim):
"""Aux function checking the decim parameter."""
_validate_type(decim, ('int-like', slice), 'decim')
if not isinstance(decim, slice):
decim = slice(None, None, int(decim))
# ensure that we can actually use `decim.step`
if decim.step is None:
decim = slice(decim.start, decim.stop, 1)
return decim
# i/o
@verbose
def write_tfrs(fname, tfr, overwrite=False, *, verbose=None):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : str
The file name, which should end with ``-tfr.h5``.
tfr : AverageTFR | list of AverageTFR | EpochsTFR
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed.
%(overwrite)s
%(verbose)s
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
_, write_hdf5 = _import_h5io_funcs()
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite, title='mnepython',
slash='replace')
def _prepare_write_tfr(tfr, condition):
"""Aux function."""
attributes = dict(times=tfr.times, freqs=tfr.freqs, data=tfr.data,
info=tfr.info, comment=tfr.comment, method=tfr.method)
if hasattr(tfr, 'nave'): # if AverageTFR
attributes['nave'] = tfr.nave
elif hasattr(tfr, 'events'): # if EpochsTFR
attributes['events'] = tfr.events
attributes['event_id'] = tfr.event_id
attributes['selection'] = tfr.selection
attributes['drop_log'] = tfr.drop_log
attributes['metadata'] = _prepare_write_metadata(tfr.metadata)
return condition, attributes
@verbose
def read_tfrs(fname, condition=None, *, verbose=None):
"""Read TFR datasets from hdf5 file.
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
%(verbose)s
Returns
-------
tfr : AverageTFR | list of AverageTFR | EpochsTFR
Depending on ``condition`` either the TFR object or a list of multiple
TFR objects.
See Also
--------
write_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5', '_tfr.h5'))
read_hdf5, _ = _import_h5io_funcs()
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname, title='mnepython', slash='replace')
for k, tfr in tfr_data:
tfr['info'] = Info(tfr['info'])
tfr['info']._check_consistency()
if 'metadata' in tfr:
tfr['metadata'] = _prepare_read_metadata(tfr['metadata'])
is_average = 'nave' in tfr
if condition is not None:
if not is_average:
raise NotImplementedError('condition not supported when reading '
'EpochsTFR.')
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{}") in this file. '
'The file contains "{}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
inst = AverageTFR if is_average else EpochsTFR
out = [inst(**d) for d in list(zip(*tfr_data))[1]]
return out
def _get_timefreqs(tfr, timefreqs):
"""Find and/or setup timefreqs for `tfr.plot_joint`."""
# Input check
timefreq_error_msg = (
"Supplied `timefreqs` are somehow malformed. Please supply None, "
"a list of tuple pairs, or a dict of such tuple pairs, not: ")
if isinstance(timefreqs, dict):
for k, v in timefreqs.items():
for item in (k, v):
if len(item) != 2 or any((not _is_numeric(n) for n in item)):
raise ValueError(timefreq_error_msg, item)
elif timefreqs is not None:
if not hasattr(timefreqs, "__len__"):
raise ValueError(timefreq_error_msg, timefreqs)
if len(timefreqs) == 2 and all((_is_numeric(v) for v in timefreqs)):
timefreqs = [tuple(timefreqs)] # stick a pair of numbers in a list
else:
for item in timefreqs:
if (hasattr(item, "__len__") and len(item) == 2 and
all((_is_numeric(n) for n in item))):
pass
else:
raise ValueError(timefreq_error_msg, item)
# If None, automatic identification of max peak
else:
from scipy.signal import argrelmax
order = max((1, tfr.data.shape[2] // 30))
peaks_idx = argrelmax(tfr.data, order=order, axis=2)
if peaks_idx[0].size == 0:
_, p_t, p_f = np.unravel_index(tfr.data.argmax(), tfr.data.shape)
timefreqs = [(tfr.times[p_t], tfr.freqs[p_f])]
else:
peaks = [tfr.data[0, f, t] for f, t in
zip(peaks_idx[1], peaks_idx[2])]
peakmax_idx = np.argmax(peaks)
peakmax_time = tfr.times[peaks_idx[2][peakmax_idx]]
peakmax_freq = tfr.freqs[peaks_idx[1][peakmax_idx]]
timefreqs = [(peakmax_time, peakmax_freq)]
timefreqs = {
tuple(k): np.asarray(timefreqs[k]) if isinstance(timefreqs, dict)
else np.array([0, 0]) for k in timefreqs}
return timefreqs
def _preproc_tfr_instance(tfr, picks, tmin, tmax, fmin, fmax, vmin, vmax, dB,
mode, baseline, exclude, copy=True):
"""Baseline and truncate (times and freqs) a TFR instance."""
tfr = tfr.copy() if copy else tfr
exclude = None if picks is None else exclude
picks = _picks_to_idx(tfr.info, picks, exclude='bads')
pick_names = [tfr.info['ch_names'][pick] for pick in picks]
tfr.pick_channels(pick_names)
if exclude == 'bads':
exclude = [ch for ch in tfr.info['bads']
if ch in tfr.info['ch_names']]
if exclude is not None:
tfr.drop_channels(exclude)
data, times, freqs, _, _ = _preproc_tfr(
tfr.data, tfr.times, tfr.freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB, tfr.info['sfreq'], copy=False)
tfr._set_times(times)
tfr.freqs = freqs
tfr.data = data
return tfr
def _check_tfr_complex(tfr, reason='source space estimation'):
"""Check that time-frequency epochs or average data is complex."""
if not np.iscomplexobj(tfr.data):
raise RuntimeError(f'Time-frequency data must be complex for {reason}')
| bsd-3-clause | fb1ce74d8882f16db262973bcd88b502 | 38.361533 | 81 | 0.561803 | 3.810393 | false | false | false | false |
mne-tools/mne-python | mne/io/tests/test_raw.py | 3 | 36397 | # -*- coding: utf-8 -*-
"""Generic tests that all raw classes should run."""
# Authors: MNE Developers
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
from contextlib import redirect_stdout
from io import StringIO
import math
import os
from os import path as op
from pathlib import Path
import re
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_almost_equal,
assert_array_equal, assert_array_less)
import mne
from mne import concatenate_raws, create_info, Annotations, pick_types
from mne.datasets import testing
from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5
from mne.io._digitization import _dig_kind_dict
from mne.io.base import _get_scaling
from mne.io.pick import _ELECTRODE_CH_TYPES, _FNIRS_CH_TYPES_SPLIT
from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt,
object_diff, check_version, requires_pandas,
_import_h5io_funcs)
from mne.io.meas_info import _get_valid_units
from mne.io._digitization import DigPoint
from mne.io.proj import Projection
from mne.io.utils import _mult_cal_one
from mne.io.constants import FIFF
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_raw.fif')
def assert_named_constants(info):
"""Assert that info['chs'] has named constants."""
# for now we just check one
__tracebackhide__ = True
r = repr(info['chs'][0])
for check in ('.*FIFFV_COORD_.*', '.*FIFFV_COIL_.*', '.*FIFF_UNIT_.*',
'.*FIFF_UNITM_.*',):
assert re.match(check, r, re.DOTALL) is not None, (check, r)
def test_orig_units():
"""Test the error handling for original units."""
# Should work fine
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units={'Cz': 'nV'})
# Should complain that channel Cz does not have a corresponding original
# unit.
with pytest.raises(ValueError, match='has no associated original unit.'):
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units={'not_Cz': 'nV'})
# Test that a non-dict orig_units argument raises a ValueError
with pytest.raises(ValueError, match='orig_units must be of type dict'):
info = create_info(ch_names=['Cz'], sfreq=100, ch_types='eeg')
BaseRaw(info, last_samps=[1], orig_units=True)
def _test_raw_reader(reader, test_preloading=True, test_kwargs=True,
boundary_decimal=2, test_scaling=True, test_rank=True,
**kwargs):
"""Test reading, writing and slicing of raw classes.
Parameters
----------
reader : function
Function to test.
test_preloading : bool
Whether not preloading is implemented for the reader. If True, both
cases and memory mapping to file are tested.
test_kwargs : dict
Test _init_kwargs support.
boundary_decimal : int
Number of decimals up to which the boundary should match.
**kwargs :
Arguments for the reader. Note: Do not use preload as kwarg.
Use ``test_preloading`` instead.
Returns
-------
raw : instance of Raw
A preloaded Raw object.
"""
tempdir = _TempDir()
rng = np.random.RandomState(0)
montage = None
if "montage" in kwargs:
montage = kwargs['montage']
del kwargs['montage']
if test_preloading:
raw = reader(preload=True, **kwargs)
rep = repr(raw)
assert rep.count('<') == 1
assert rep.count('>') == 1
if montage is not None:
raw.set_montage(montage)
# don't assume the first is preloaded
buffer_fname = op.join(tempdir, 'buffer')
picks = rng.permutation(np.arange(len(raw.ch_names) - 1))[:10]
picks = np.append(picks, len(raw.ch_names) - 1) # test trigger channel
bnd = min(int(round(raw.buffer_size_sec *
raw.info['sfreq'])), raw.n_times)
slices = [slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
slice(3, 300), slice(None), slice(1, bnd)]
if raw.n_times >= 2 * bnd: # at least two complete blocks
slices += [slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
slice(0, bnd + 100)]
other_raws = [reader(preload=buffer_fname, **kwargs),
reader(preload=False, **kwargs)]
for sl_time in slices:
data1, times1 = raw[picks, sl_time]
for other_raw in other_raws:
data2, times2 = other_raw[picks, sl_time]
assert_allclose(
data1, data2, err_msg='Data mismatch with preload')
assert_allclose(times1, times2)
# test projection vs cals and data units
other_raw = reader(preload=False, **kwargs)
other_raw.del_proj()
eeg = meg = fnirs = False
if 'eeg' in raw:
eeg, atol = True, 1e-18
elif 'grad' in raw:
meg, atol = 'grad', 1e-24
elif 'mag' in raw:
meg, atol = 'mag', 1e-24
elif 'hbo' in raw:
fnirs, atol = 'hbo', 1e-10
elif 'hbr' in raw:
fnirs, atol = 'hbr', 1e-10
else:
assert 'fnirs_cw_amplitude' in raw, 'New channel type necessary?'
fnirs, atol = 'fnirs_cw_amplitude', 1e-10
picks = pick_types(
other_raw.info, meg=meg, eeg=eeg, fnirs=fnirs)
col_names = [other_raw.ch_names[pick] for pick in picks]
proj = np.ones((1, len(picks)))
proj /= np.sqrt(proj.shape[1])
proj = Projection(
data=dict(data=proj, nrow=1, row_names=None,
col_names=col_names, ncol=len(picks)),
active=False)
assert len(other_raw.info['projs']) == 0
other_raw.add_proj(proj)
assert len(other_raw.info['projs']) == 1
# Orders of projector application, data loading, and reordering
# equivalent:
# 1. load->apply->get
data_load_apply_get = \
other_raw.copy().load_data().apply_proj().get_data(picks)
# 2. apply->get (and don't allow apply->pick)
apply = other_raw.copy().apply_proj()
data_apply_get = apply.get_data(picks)
data_apply_get_0 = apply.get_data(picks[0])[0]
with pytest.raises(RuntimeError, match='loaded'):
apply.copy().pick(picks[0]).get_data()
# 3. apply->load->get
data_apply_load_get = apply.copy().load_data().get_data(picks)
data_apply_load_get_0, data_apply_load_get_1 = \
apply.copy().load_data().pick(picks[:2]).get_data()
# 4. reorder->apply->load->get
all_picks = np.arange(len(other_raw.ch_names))
reord = np.concatenate((
picks[1::2],
picks[0::2],
np.setdiff1d(all_picks, picks)))
rev = np.argsort(reord)
assert_array_equal(reord[rev], all_picks)
assert_array_equal(rev[reord], all_picks)
reorder = other_raw.copy().pick(reord)
assert reorder.ch_names == [other_raw.ch_names[r] for r in reord]
assert reorder.ch_names[0] == other_raw.ch_names[picks[1]]
assert_allclose(reorder.get_data([0]), other_raw.get_data(picks[1]))
reorder_apply = reorder.copy().apply_proj()
assert reorder_apply.ch_names == reorder.ch_names
assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
assert_allclose(reorder_apply.get_data([0]), apply.get_data(picks[1]),
atol=1e-18)
data_reorder_apply_load_get = \
reorder_apply.load_data().get_data(rev[:len(picks)])
data_reorder_apply_load_get_1 = \
reorder_apply.copy().load_data().pick([0]).get_data()[0]
assert reorder_apply.ch_names[0] == apply.ch_names[picks[1]]
assert (data_load_apply_get.shape ==
data_apply_get.shape ==
data_apply_load_get.shape ==
data_reorder_apply_load_get.shape)
del apply
# first check that our data are (probably) in the right units
data = data_load_apply_get.copy()
data = data - np.mean(data, axis=1, keepdims=True) # can be offsets
np.abs(data, out=data)
if test_scaling:
maxval = atol * 1e16
assert_array_less(data, maxval)
minval = atol * 1e6
assert_array_less(minval, np.median(data))
else:
atol = 1e-7 * np.median(data) # 1e-7 * MAD
# ranks should all be reduced by 1
if test_rank == 'less':
cmp = np.less
elif test_rank is False:
cmp = None
else: # anything else is like True or 'equal'
assert test_rank is True or test_rank == 'equal', test_rank
cmp = np.equal
rank_load_apply_get = np.linalg.matrix_rank(data_load_apply_get)
rank_apply_get = np.linalg.matrix_rank(data_apply_get)
rank_apply_load_get = np.linalg.matrix_rank(data_apply_load_get)
if cmp is not None:
assert cmp(rank_load_apply_get, len(col_names) - 1)
assert cmp(rank_apply_get, len(col_names) - 1)
assert cmp(rank_apply_load_get, len(col_names) - 1)
# and they should all match
t_kw = dict(
atol=atol, err_msg='before != after, likely _mult_cal_one prob')
assert_allclose(data_apply_get[0], data_apply_get_0, **t_kw)
assert_allclose(data_apply_load_get_1,
data_reorder_apply_load_get_1, **t_kw)
assert_allclose(data_load_apply_get[0], data_apply_load_get_0, **t_kw)
assert_allclose(data_load_apply_get, data_apply_get, **t_kw)
assert_allclose(data_load_apply_get, data_apply_load_get, **t_kw)
if 'eeg' in raw:
other_raw.del_proj()
direct = \
other_raw.copy().load_data().set_eeg_reference().get_data()
other_raw.set_eeg_reference(projection=True)
assert len(other_raw.info['projs']) == 1
this_proj = other_raw.info['projs'][0]['data']
assert this_proj['col_names'] == col_names
assert this_proj['data'].shape == proj['data']['data'].shape
assert_allclose(
np.linalg.norm(proj['data']['data']), 1., atol=1e-6)
assert_allclose(
np.linalg.norm(this_proj['data']), 1., atol=1e-6)
assert_allclose(this_proj['data'], proj['data']['data'])
proj = other_raw.apply_proj().get_data()
assert_allclose(proj[picks], data_load_apply_get, atol=1e-10)
assert_allclose(proj, direct, atol=1e-10, err_msg=t_kw['err_msg'])
else:
raw = reader(**kwargs)
n_samp = len(raw.times)
assert_named_constants(raw.info)
# smoke test for gh #9743
ids = [id(ch['loc']) for ch in raw.info['chs']]
assert len(set(ids)) == len(ids)
full_data = raw._data
assert raw.__class__.__name__ in repr(raw) # to test repr
assert raw.info.__class__.__name__ in repr(raw.info)
assert isinstance(raw.info['dig'], (type(None), list))
data_max = full_data.max()
data_min = full_data.min()
# these limits could be relaxed if we actually find data with
# huge values (in SI units)
assert data_max < 1e5
assert data_min > -1e5
if isinstance(raw.info['dig'], list):
for di, d in enumerate(raw.info['dig']):
assert isinstance(d, DigPoint), (di, d)
# gh-5604
meas_date = raw.info['meas_date']
assert meas_date is None or meas_date >= _stamp_to_dt((0, 0))
# test repr_html
assert 'Good channels' in raw.info._repr_html_()
# test resetting raw
if test_kwargs:
raw2 = reader(**raw._init_kwargs)
assert set(raw.info.keys()) == set(raw2.info.keys())
assert_array_equal(raw.times, raw2.times)
# Test saving and reading
out_fname = op.join(tempdir, 'test_raw.fif')
raw = concatenate_raws([raw])
raw.save(out_fname, tmax=raw.times[-1], overwrite=True, buffer_size_sec=1)
# Test saving with not correct extension
out_fname_h5 = op.join(tempdir, 'test_raw.h5')
with pytest.raises(IOError, match='raw must end with .fif or .fif.gz'):
raw.save(out_fname_h5)
raw3 = read_raw_fif(out_fname)
assert_named_constants(raw3.info)
assert set(raw.info.keys()) == set(raw3.info.keys())
assert_allclose(raw3[0:20][0], full_data[0:20], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
assert_allclose(raw.times, raw3.times, atol=1e-6, rtol=1e-6)
assert not math.isnan(raw3.info['highpass'])
assert not math.isnan(raw3.info['lowpass'])
assert not math.isnan(raw.info['highpass'])
assert not math.isnan(raw.info['lowpass'])
assert raw3.info['kit_system_id'] == raw.info['kit_system_id']
# Make sure concatenation works
first_samp = raw.first_samp
last_samp = raw.last_samp
concat_raw = concatenate_raws([raw.copy(), raw])
assert concat_raw.n_times == 2 * raw.n_times
assert concat_raw.first_samp == first_samp
assert concat_raw.last_samp - last_samp + first_samp == last_samp + 1
idx = np.where(concat_raw.annotations.description == 'BAD boundary')[0]
expected_bad_boundary_onset = raw._last_time
assert_array_almost_equal(concat_raw.annotations.onset[idx],
expected_bad_boundary_onset,
decimal=boundary_decimal)
if raw.info['meas_id'] is not None:
for key in ['secs', 'usecs', 'version']:
assert raw.info['meas_id'][key] == raw3.info['meas_id'][key]
assert_array_equal(raw.info['meas_id']['machid'],
raw3.info['meas_id']['machid'])
assert isinstance(raw.annotations, Annotations)
# Make a "soft" test on units: They have to be valid SI units as in
# mne.io.meas_info.valid_units, but we accept any lower/upper case for now.
valid_units = _get_valid_units()
valid_units_lower = [unit.lower() for unit in valid_units]
if raw._orig_units is not None:
assert isinstance(raw._orig_units, dict)
for ch_name, unit in raw._orig_units.items():
assert unit.lower() in valid_units_lower, ch_name
# Test picking with and without preload
if test_preloading:
preload_kwargs = (dict(preload=True), dict(preload=False))
else:
preload_kwargs = (dict(),)
n_ch = len(raw.ch_names)
picks = rng.permutation(n_ch)
for preload_kwarg in preload_kwargs:
these_kwargs = kwargs.copy()
these_kwargs.update(preload_kwarg)
# don't use the same filename or it could create problems
if isinstance(these_kwargs.get('preload', None), str) and \
op.isfile(these_kwargs['preload']):
these_kwargs['preload'] += '-1'
whole_raw = reader(**these_kwargs)
print(whole_raw) # __repr__
assert n_ch >= 2
picks_1 = picks[:n_ch // 2]
picks_2 = picks[n_ch // 2:]
raw_1 = whole_raw.copy().pick(picks_1)
raw_2 = whole_raw.copy().pick(picks_2)
data, times = whole_raw[:]
data_1, times_1 = raw_1[:]
data_2, times_2 = raw_2[:]
assert_array_equal(times, times_1)
assert_array_equal(data[picks_1], data_1)
assert_array_equal(times, times_2,)
assert_array_equal(data[picks_2], data_2)
# Make sure that writing info to h5 format
# (all fields should be compatible)
if check_version('h5io'):
read_hdf5, write_hdf5 = _import_h5io_funcs()
fname_h5 = op.join(tempdir, 'info.h5')
with _writing_info_hdf5(raw.info):
write_hdf5(fname_h5, raw.info)
new_info = Info(read_hdf5(fname_h5))
assert object_diff(new_info, raw.info) == ''
# Make sure that changing directory does not break anything
if test_preloading:
these_kwargs = kwargs.copy()
key = None
for key in ('fname',
'input_fname', # artemis123
'vhdr_fname', # BV
'pdf_fname', # BTi
'directory', # CTF
'filename', # nedf
):
try:
fname = kwargs[key]
except KeyError:
key = None
else:
break
# len(kwargs) == 0 for the fake arange reader
if len(kwargs):
assert key is not None, sorted(kwargs.keys())
this_fname = fname[0] if isinstance(fname, list) else fname
dirname = op.dirname(this_fname)
these_kwargs[key] = op.basename(this_fname)
these_kwargs['preload'] = False
orig_dir = os.getcwd()
try:
os.chdir(dirname)
raw_chdir = reader(**these_kwargs)
finally:
os.chdir(orig_dir)
raw_chdir.load_data()
# make sure that cropping works (with first_samp shift)
if n_samp >= 50: # we crop to this number of samples below
for t_prop in (0., 0.5):
_test_raw_crop(reader, t_prop, kwargs)
if test_preloading:
use_kwargs = kwargs.copy()
use_kwargs['preload'] = True
_test_raw_crop(reader, t_prop, use_kwargs)
# make sure electrode-like sensor locations show up as dig points
eeg_dig = [d for d in (raw.info['dig'] or [])
if d['kind'] == _dig_kind_dict['eeg']]
pick_kwargs = dict()
for t in _ELECTRODE_CH_TYPES + ('fnirs',):
pick_kwargs[t] = True
dig_picks = pick_types(raw.info, exclude=(), **pick_kwargs)
dig_types = _ELECTRODE_CH_TYPES + _FNIRS_CH_TYPES_SPLIT
assert (len(dig_picks) > 0) == any(t in raw for t in dig_types)
if len(dig_picks):
eeg_loc = np.array([ # eeg_loc a bit of a misnomer to match eeg_dig
raw.info['chs'][pick]['loc'][:3] for pick in dig_picks])
eeg_loc = eeg_loc[np.isfinite(eeg_loc).all(axis=1)]
if len(eeg_loc):
if 'fnirs_cw_amplitude' in raw:
assert 2 * len(eeg_dig) >= len(eeg_loc)
else:
assert len(eeg_dig) >= len(eeg_loc) # could have some excluded
# make sure that dig points in head coords implies that fiducials are
# present
if len(raw.info['dig'] or []) > 0:
card_pts = [d for d in raw.info['dig']
if d['kind'] == _dig_kind_dict['cardinal']]
eeg_dig_head = [
d for d in eeg_dig if d['coord_frame'] == FIFF.FIFFV_COORD_HEAD]
if len(eeg_dig_head):
assert len(card_pts) == 3, 'Cardinal points missing'
if len(card_pts) == 3: # they should all be in head coords then
assert len(eeg_dig_head) == len(eeg_dig)
return raw
def _test_raw_crop(reader, t_prop, kwargs):
raw_1 = reader(**kwargs)
n_samp = 50 # crop to this number of samples (per instance)
crop_t = n_samp / raw_1.info['sfreq']
t_start = t_prop * crop_t # also crop to some fraction into the first inst
extra = f' t_start={t_start}, preload={kwargs.get("preload", False)}'
stop = (n_samp - 1) / raw_1.info['sfreq']
raw_1.crop(0, stop)
assert len(raw_1.times) == 50
first_time = raw_1.first_time
atol = 0.5 / raw_1.info['sfreq']
assert_allclose(raw_1.times[-1], stop, atol=atol)
raw_2, raw_3 = raw_1.copy(), raw_1.copy()
t_tot = raw_1.times[-1] * 3 + 2. / raw_1.info['sfreq']
raw_concat = concatenate_raws([raw_1, raw_2, raw_3])
assert len(raw_concat._filenames) == 3
assert_allclose(raw_concat.times[-1], t_tot)
assert_allclose(raw_concat.first_time, first_time)
# keep all instances, but crop to t_start at the beginning
raw_concat.crop(t_start, None)
assert len(raw_concat._filenames) == 3
assert_allclose(raw_concat.times[-1], t_tot - t_start, atol=atol)
assert_allclose(
raw_concat.first_time, first_time + t_start, atol=atol,
err_msg=f'Base concat, {extra}')
# drop the first instance
raw_concat.crop(crop_t, None)
assert len(raw_concat._filenames) == 2
assert_allclose(
raw_concat.times[-1], t_tot - t_start - crop_t, atol=atol)
assert_allclose(
raw_concat.first_time, first_time + t_start + crop_t,
atol=atol, err_msg=f'Dropping one, {extra}')
# drop the second instance, leaving just one
raw_concat.crop(crop_t, None)
assert len(raw_concat._filenames) == 1
assert_allclose(
raw_concat.times[-1], t_tot - t_start - 2 * crop_t, atol=atol)
assert_allclose(
raw_concat.first_time, first_time + t_start + 2 * crop_t,
atol=atol, err_msg=f'Dropping two, {extra}')
def _test_concat(reader, *args):
"""Test concatenation of raw classes that allow not preloading."""
data = None
for preload in (True, False):
raw1 = reader(*args, preload=preload)
raw2 = reader(*args, preload=preload)
raw1.append(raw2)
raw1.load_data()
if data is None:
data = raw1[:, :][0]
assert_allclose(data, raw1[:, :][0])
for first_preload in (True, False):
raw = reader(*args, preload=first_preload)
data = raw[:, :][0]
for preloads in ((True, True), (True, False), (False, False)):
for last_preload in (True, False):
t_crops = raw.times[np.argmin(np.abs(raw.times - 0.5)) +
[0, 1]]
raw1 = raw.copy().crop(0, t_crops[0])
if preloads[0]:
raw1.load_data()
raw2 = raw.copy().crop(t_crops[1], None)
if preloads[1]:
raw2.load_data()
raw1.append(raw2)
if last_preload:
raw1.load_data()
assert_allclose(data, raw1[:, :][0])
@testing.requires_testing_data
def test_time_as_index():
"""Test indexing of raw times."""
raw = read_raw_fif(raw_fname)
# Test original (non-rounding) indexing behavior
orig_inds = raw.time_as_index(raw.times)
assert len(set(orig_inds)) != len(orig_inds)
# Test new (rounding) indexing behavior
new_inds = raw.time_as_index(raw.times, use_rounding=True)
assert_array_equal(new_inds, np.arange(len(raw.times)))
@pytest.mark.parametrize('meas_date', [None, "orig"])
@pytest.mark.parametrize('first_samp', [0, 10000])
def test_crop_by_annotations(meas_date, first_samp):
"""Test crop by annotations of raw."""
raw = read_raw_fif(raw_fname)
if meas_date is None:
raw.set_meas_date(None)
raw = mne.io.RawArray(raw.get_data(), raw.info, first_samp=first_samp)
onset = np.array([0, 1.5], float)
if meas_date is not None:
onset += raw.first_time
annot = mne.Annotations(
onset=onset,
duration=[1, 0.5],
description=["a", "b"],
orig_time=raw.info['meas_date'])
raw.set_annotations(annot)
raws = raw.crop_by_annotations()
assert len(raws) == 2
assert len(raws[0].annotations) == 1
assert raws[0].times[-1] == pytest.approx(annot[:1].duration[0], rel=1e-3)
assert raws[0].annotations.description[0] == annot.description[0]
assert len(raws[1].annotations) == 1
assert raws[1].times[-1] == pytest.approx(annot[1:2].duration[0], rel=5e-3)
assert raws[1].annotations.description[0] == annot.description[1]
@pytest.mark.parametrize('offset, origin', [
pytest.param(0, None, id='times in s. relative to first_samp (default)'),
pytest.param(0, 2.0, id='times in s. relative to first_samp'),
pytest.param(1, 1.0, id='times in s. relative to meas_date'),
pytest.param(2, 0.0, id='absolute times in s. relative to 0')])
def test_time_as_index_ref(offset, origin):
"""Test indexing of raw times."""
info = create_info(ch_names=10, sfreq=10.)
raw = RawArray(data=np.empty((10, 10)), info=info, first_samp=10)
raw.set_meas_date(1)
relative_times = raw.times
inds = raw.time_as_index(relative_times + offset,
use_rounding=True,
origin=origin)
assert_array_equal(inds, np.arange(raw.n_times))
def test_meas_date_orig_time():
"""Test the relation between meas_time in orig_time."""
# meas_time is set and orig_time is set:
# clips the annotations based on raw.data and resets the annotation based
# on raw.info['meas_date]
raw = _raw_annot(1, 1.5)
assert raw.annotations.orig_time == _stamp_to_dt((1, 0))
assert raw.annotations.onset[0] == 1
# meas_time is set and orig_time is None:
# Consider annot.orig_time to be raw.frist_sample, clip and reset
# annotations to have the raw.annotations.orig_time == raw.info['meas_date]
raw = _raw_annot(1, None)
assert raw.annotations.orig_time == _stamp_to_dt((1, 0))
assert raw.annotations.onset[0] == 1.5
# meas_time is None and orig_time is set:
# Raise error, it makes no sense to have an annotations object that we know
# when was acquired and set it to a raw object that does not know when was
# it acquired.
with pytest.raises(RuntimeError, match='Ambiguous operation'):
_raw_annot(None, 1.5)
# meas_time is None and orig_time is None:
# Consider annot.orig_time to be raw.first_sample and clip
raw = _raw_annot(None, None)
assert raw.annotations.orig_time is None
assert raw.annotations.onset[0] == 1.5
assert raw.annotations.duration[0] == 0.2
def test_get_data_reject():
"""Test if reject_by_annotation is working correctly."""
fs = 256
ch_names = ["C3", "Cz", "C4"]
info = create_info(ch_names, sfreq=fs)
raw = RawArray(np.zeros((len(ch_names), 10 * fs)), info)
raw.set_annotations(Annotations(onset=[2, 4], duration=[3, 2],
description="bad"))
with catch_logging() as log:
data = raw.get_data(reject_by_annotation="omit", verbose=True)
msg = ('Omitting 1024 of 2560 (40.00%) samples, retaining 1536' +
' (60.00%) samples.')
assert log.getvalue().strip() == msg
assert data.shape == (len(ch_names), 1536)
with catch_logging() as log:
data = raw.get_data(reject_by_annotation="nan", verbose=True)
msg = ('Setting 1024 of 2560 (40.00%) samples to NaN, retaining 1536' +
' (60.00%) samples.')
assert log.getvalue().strip() == msg
assert data.shape == (len(ch_names), 2560) # shape doesn't change
assert np.isnan(data).sum() == 3072 # but NaNs are introduced instead
def test_5839():
"""Test concatenating raw objects with annotations."""
# Global Time 0 1 2 3 4
# .
# raw_A |---------XXXXXXXXXX
# annot |--------------AA
# latency . 0 0 1 1 2 2 3
# . 5 0 5 0 5 0
#
# raw_B . |---------YYYYYYYYYY
# annot . |--------------AA
# latency . 0 1
# . 5 0
# .
# output |---------XXXXXXXXXXYYYYYYYYYY
# annot |--------------AA---|----AA
# latency . 0 0 1 1 2 2 3
# . 5 0 5 0 5 0
#
EXPECTED_ONSET = [1.5, 2., 2., 2.5]
EXPECTED_DURATION = [0.2, 0., 0., 0.2]
EXPECTED_DESCRIPTION = ['dummy', 'BAD boundary', 'EDGE boundary', 'dummy']
def raw_factory(meas_date):
raw = RawArray(data=np.empty((10, 10)),
info=create_info(ch_names=10, sfreq=10.),
first_samp=10)
raw.set_meas_date(meas_date)
raw.set_annotations(annotations=Annotations(onset=[.5],
duration=[.2],
description='dummy',
orig_time=None))
return raw
raw_A, raw_B = [raw_factory((x, 0)) for x in [0, 2]]
raw_A.append(raw_B)
assert_array_equal(raw_A.annotations.onset, EXPECTED_ONSET)
assert_array_equal(raw_A.annotations.duration, EXPECTED_DURATION)
assert_array_equal(raw_A.annotations.description, EXPECTED_DESCRIPTION)
assert raw_A.annotations.orig_time == _stamp_to_dt((0, 0))
def test_repr():
"""Test repr of Raw."""
sfreq = 256
info = create_info(3, sfreq)
raw = RawArray(np.zeros((3, 10 * sfreq)), info)
r = repr(raw)
assert re.search('<RawArray | 3 x 2560 (10.0 s), ~.* kB, data loaded>',
r) is not None, r
assert raw._repr_html_()
# A class that sets channel data to np.arange, for testing _test_raw_reader
class _RawArange(BaseRaw):
def __init__(self, preload=False, verbose=None):
info = create_info(list(str(x) for x in range(1, 9)), 1000., 'eeg')
super().__init__(info, preload, last_samps=(999,), verbose=verbose)
assert len(self.times) == 1000
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
one = np.full((8, stop - start), np.nan)
one[idx] = np.arange(1, 9)[idx, np.newaxis]
_mult_cal_one(data, one, idx, cals, mult)
def _read_raw_arange(preload=False, verbose=None):
return _RawArange(preload, verbose)
def test_test_raw_reader():
"""Test _test_raw_reader."""
_test_raw_reader(_read_raw_arange, test_scaling=False, test_rank='less')
@pytest.mark.slowtest
def test_describe_print():
"""Test print output of describe method."""
fname = Path(__file__).parent / "data" / "test_raw.fif"
raw = read_raw_fif(fname)
# test print output
f = StringIO()
with redirect_stdout(f):
raw.describe()
s = f.getvalue().strip().split("\n")
assert len(s) == 378
# Can be 3.1, 3.3, etc.
assert re.match(
r'<Raw | test_raw.fif, 376 x 14400 (24\.0 s), '
r'~3\.. MB, data not loaded>', s[0]) is not None, s[0]
assert s[1] == " ch name type unit min Q1 median Q3 max" # noqa
assert s[2] == " 0 MEG 0113 GRAD fT/cm -221.80 -38.57 -9.64 19.29 414.67" # noqa
assert s[-1] == "375 EOG 061 EOG µV -231.41 271.28 277.16 285.66 334.69" # noqa
@requires_pandas
@pytest.mark.slowtest
def test_describe_df():
"""Test returned data frame of describe method."""
fname = Path(__file__).parent / "data" / "test_raw.fif"
raw = read_raw_fif(fname)
df = raw.describe(data_frame=True)
assert df.shape == (376, 8)
assert (df.columns.tolist() == ["name", "type", "unit", "min", "Q1",
"median", "Q3", "max"])
assert df.index.name == "ch"
assert_allclose(df.iloc[0, 3:].astype(float),
np.array([-2.218017605790535e-11,
-3.857421923113974e-12,
-9.643554807784935e-13,
1.928710961556987e-12,
4.146728567347522e-11]))
def test_get_data_units():
"""Test the "units" argument of get_data method."""
# Test the unit conversion function
assert _get_scaling('eeg', 'uV') == 1e6
assert _get_scaling('eeg', 'dV') == 1e1
assert _get_scaling('eeg', 'pV') == 1e12
assert _get_scaling('mag', 'fT') == 1e15
assert _get_scaling('grad', 'T/m') == 1
assert _get_scaling('grad', 'T/mm') == 1e-3
assert _get_scaling('grad', 'fT/m') == 1e15
assert _get_scaling('grad', 'fT/cm') == 1e13
assert _get_scaling('csd', 'uV/cm²') == 1e2
fname = Path(__file__).parent / "data" / "test_raw.fif"
raw = read_raw_fif(fname)
last = np.array([4.63803098e-05, 7.66563736e-05, 2.71933595e-04])
last_eeg = np.array([7.12207023e-05, 4.63803098e-05, 7.66563736e-05])
last_grad = np.array([-3.85742192e-12, 9.64355481e-13, -1.06079103e-11])
# None
data_none = raw.get_data()
assert data_none.shape == (376, 14400)
assert_array_almost_equal(data_none[-3:, -1], last)
# str: unit no conversion
data_str_noconv = raw.get_data(picks=['eeg'], units='V')
assert data_str_noconv.shape == (60, 14400)
assert_array_almost_equal(data_str_noconv[-3:, -1], last_eeg)
# str: simple unit
data_str_simple = raw.get_data(picks=['eeg'], units='uV')
assert data_str_simple.shape == (60, 14400)
assert_array_almost_equal(data_str_simple[-3:, -1], last_eeg * 1e6)
# str: fraction unit
data_str_fraction = raw.get_data(picks=['grad'], units='fT/cm')
assert data_str_fraction.shape == (204, 14400)
assert_array_almost_equal(data_str_fraction[-3:, -1],
last_grad * (1e15 / 1e2))
# str: more than one channel type but one with unit
data_str_simplestim = raw.get_data(picks=['eeg', 'stim'], units='V')
assert data_str_simplestim.shape == (69, 14400)
assert_array_almost_equal(data_str_simplestim[-3:, -1], last_eeg)
# str: too many channels
with pytest.raises(ValueError, match='more than one channel'):
raw.get_data(units='uV')
# str: invalid unit
with pytest.raises(ValueError, match='is not a valid unit'):
raw.get_data(picks=['eeg'], units='fV/cm')
# dict: combination of simple and fraction units
data_dict = raw.get_data(units=dict(grad='fT/cm', mag='fT', eeg='uV'))
assert data_dict.shape == (376, 14400)
assert_array_almost_equal(data_dict[0, -1],
-3.857421923113974e-12 * (1e15 / 1e2))
assert_array_almost_equal(data_dict[2, -1], -2.1478272253525944e-13 * 1e15)
assert_array_almost_equal(data_dict[-2, -1], 7.665637356879529e-05 * 1e6)
# dict: channel type not in instance
data_dict_notin = raw.get_data(units=dict(hbo='uM'))
assert data_dict_notin.shape == (376, 14400)
assert_array_almost_equal(data_dict_notin[-3:, -1], last)
# dict: one invalid unit
with pytest.raises(ValueError, match='is not a valid unit'):
raw.get_data(units=dict(grad='fT/cV', mag='fT', eeg='uV'))
# dict: one invalid channel type
with pytest.raises(KeyError, match='is not a channel type'):
raw.get_data(units=dict(bad_type='fT/cV', mag='fT', eeg='uV'))
# not the good type
with pytest.raises(TypeError, match='instance of None, str, or dict'):
raw.get_data(units=['fT/cm', 'fT', 'uV'])
def test_repr_dig_point():
"""Test printing of DigPoint."""
dp = DigPoint(r=np.arange(3), coord_frame=FIFF.FIFFV_COORD_HEAD,
kind=FIFF.FIFFV_POINT_EEG, ident=0)
assert 'mm' in repr(dp)
dp = DigPoint(r=np.arange(3), coord_frame=FIFF.FIFFV_MNE_COORD_MRI_VOXEL,
kind=FIFF.FIFFV_POINT_CARDINAL, ident=0)
assert 'mm' not in repr(dp)
assert 'voxel' in repr(dp)
def test_get_data_tmin_tmax():
"""Test tmin and tmax parameters of get_data method."""
fname = Path(__file__).parent / "data" / "test_raw.fif"
raw = read_raw_fif(fname)
# tmin and tmax just use time_as_index under the hood
tmin, tmax = (1, 9)
d1 = raw.get_data()
d2 = raw.get_data(tmin=tmin, tmax=tmax)
idxs = raw.time_as_index([tmin, tmax])
assert_allclose(d1[:, idxs[0]:idxs[1]], d2)
# specifying a too low tmin truncates to idx 0
d3 = raw.get_data(tmin=-5)
assert_allclose(d3, d1)
# specifying a too high tmax truncates to idx n_times
d4 = raw.get_data(tmax=1e6)
assert_allclose(d4, d1)
# when start/stop are passed, tmin/tmax are ignored
d5 = raw.get_data(start=1, stop=2, tmin=tmin, tmax=tmax)
assert d5.shape[1] == 1
# validate inputs are properly raised
with pytest.raises(TypeError, match='start must be .* int'):
raw.get_data(start=None)
with pytest.raises(TypeError, match='stop must be .* int'):
raw.get_data(stop=2.3)
with pytest.raises(TypeError, match='tmin must be .* float'):
raw.get_data(tmin=[1, 2])
with pytest.raises(TypeError, match='tmax must be .* float'):
raw.get_data(tmax=[1, 2])
| bsd-3-clause | fe1fc607c7ad500d7460534c8acbab06 | 40.031567 | 111 | 0.578019 | 3.280602 | false | true | false | false |
mne-tools/mne-python | mne/decoding/ems.py | 8 | 7630 | # Author: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
from collections import Counter
import numpy as np
from .mixin import TransformerMixin, EstimatorMixin
from .base import _set_cv
from ..io.pick import _picks_to_idx
from ..parallel import parallel_func
from ..utils import logger, verbose
from .. import pick_types, pick_info
class EMS(TransformerMixin, EstimatorMixin):
"""Transformer to compute event-matched spatial filters.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note:: EMS only works for binary classification.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_times)
The set of spatial filters.
classes_ : ndarray, shape (n_classes,)
The target classes.
References
----------
.. footbibliography::
"""
def __repr__(self): # noqa: D105
if hasattr(self, 'filters_'):
return '<EMS: fitted with %i filters on %i classes.>' % (
len(self.filters_), len(self.classes_))
else:
return '<EMS: not fitted.>'
def fit(self, X, y):
"""Fit the spatial filters.
.. note : EMS is fitted on data normalized by channel type before the
fitting of the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The training data.
y : array of int, shape (n_epochs)
The target classes.
Returns
-------
self : instance of EMS
Returns self.
"""
classes = np.unique(y)
if len(classes) != 2:
raise ValueError('EMS only works for binary classification.')
self.classes_ = classes
filters = X[y == classes[0]].mean(0) - X[y == classes[1]].mean(0)
filters /= np.linalg.norm(filters, axis=0)[None, :]
self.filters_ = filters
return self
def transform(self, X):
"""Transform the data by the spatial filters.
Parameters
----------
X : array, shape (n_epochs, n_channels, n_times)
The input data.
Returns
-------
X : array, shape (n_epochs, n_times)
The input data transformed by the spatial filters.
"""
Xt = np.sum(X * self.filters_, axis=1)
return Xt
@verbose
def compute_ems(epochs, conditions=None, picks=None, n_jobs=None, cv=None,
verbose=None):
"""Compute event-matched spatial filter on epochs.
This version of EMS :footcite:`SchurgerEtAl2013` operates on the entire
time course. No time
window needs to be specified. The result is a spatial filter at each
time point and a corresponding time course. Intuitively, the result
gives the similarity between the filter at each time point and the
data vector (sensors) at that time point.
.. note : EMS only works for binary classification.
.. note : The present function applies a leave-one-out cross-validation,
following Schurger et al's paper. However, we recommend using
a stratified k-fold cross-validation. Indeed, leave-one-out tends
to overfit and cannot be used to estimate the variance of the
prediction within a given fold.
.. note : Because of the leave-one-out, this function needs an equal
number of epochs in each of the two conditions.
Parameters
----------
epochs : instance of mne.Epochs
The epochs.
conditions : list of str | None, default None
If a list of strings, strings must match the epochs.event_id's key as
well as the number of conditions supported by the objective_function.
If None keys in epochs.event_id are used.
%(picks_good_data)s
%(n_jobs)s
cv : cross-validation object | str | None, default LeaveOneOut
The cross-validation scheme.
%(verbose)s
Returns
-------
surrogate_trials : ndarray, shape (n_trials // 2, n_times)
The trial surrogates.
mean_spatial_filter : ndarray, shape (n_channels, n_times)
The set of spatial filters.
conditions : ndarray, shape (n_classes,)
The conditions used. Values correspond to original event ids.
References
----------
.. footbibliography::
"""
logger.info('...computing surrogate time series. This can take some time')
# Default to leave-one-out cv
cv = 'LeaveOneOut' if cv is None else cv
picks = _picks_to_idx(epochs.info, picks)
if not len(set(Counter(epochs.events[:, 2]).values())) == 1:
raise ValueError('The same number of epochs is required by '
'this function. Please consider '
'`epochs.equalize_event_counts`')
if conditions is None:
conditions = epochs.event_id.keys()
epochs = epochs.copy()
else:
epochs = epochs[conditions]
epochs.drop_bad()
if len(conditions) != 2:
raise ValueError('Currently this function expects exactly 2 '
'conditions but you gave me %i' %
len(conditions))
ev = epochs.events[:, 2]
# Special care to avoid path dependent mappings and orders
conditions = list(sorted(conditions))
cond_idx = [np.where(ev == epochs.event_id[k])[0] for k in conditions]
info = pick_info(epochs.info, picks)
data = epochs.get_data(picks=picks)
# Scale (z-score) the data by channel type
# XXX the z-scoring is applied outside the CV, which is not standard.
for ch_type in ['mag', 'grad', 'eeg']:
if ch_type in epochs:
# FIXME should be applied to all sort of data channels
if ch_type == 'eeg':
this_picks = pick_types(info, meg=False, eeg=True)
else:
this_picks = pick_types(info, meg=ch_type, eeg=False)
data[:, this_picks] /= np.std(data[:, this_picks])
# Setup cross-validation. Need to use _set_cv to deal with sklearn
# deprecation of cv objects.
y = epochs.events[:, 2]
_, cv_splits = _set_cv(cv, 'classifier', X=y, y=y)
parallel, p_func, n_jobs = parallel_func(_run_ems, n_jobs=n_jobs)
# FIXME this parallelization should be removed.
# 1) it's numpy computation so it's already efficient,
# 2) it duplicates the data in RAM,
# 3) the computation is already super fast.
out = parallel(p_func(_ems_diff, data, cond_idx, train, test)
for train, test in cv_splits)
surrogate_trials, spatial_filter = zip(*out)
surrogate_trials = np.array(surrogate_trials)
spatial_filter = np.mean(spatial_filter, axis=0)
return surrogate_trials, spatial_filter, epochs.events[:, 2]
def _ems_diff(data0, data1):
"""Compute the default diff objective function."""
return np.mean(data0, axis=0) - np.mean(data1, axis=0)
def _run_ems(objective_function, data, cond_idx, train, test):
"""Run EMS."""
d = objective_function(*(data[np.intersect1d(c, train)] for c in cond_idx))
d /= np.sqrt(np.sum(d ** 2, axis=0))[None, :]
# compute surrogates
return np.sum(data[test[0]] * d, axis=0), d
| bsd-3-clause | 9452dc5ce0ac6f0708c43372ef297a95 | 34.16129 | 79 | 0.618087 | 3.916838 | false | false | false | false |
mne-tools/mne-python | mne/viz/backends/_pyvista.py | 1 | 46684 | """
Core visualization operations based on PyVista.
Actual implementation of _Renderer and _Projection classes.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
from contextlib import contextmanager
from inspect import signature
import os
import re
import sys
import warnings
import numpy as np
from ._abstract import _AbstractRenderer, Figure3D
from ._utils import (_get_colormap_from_array, _alpha_blend_background,
ALLOWED_QUIVER_MODES, _init_mne_qtapp)
from ...fixes import _compare_version
from ...transforms import apply_trans
from ...utils import (copy_base_doc_to_subclass_doc, _check_option,
_require_version, _validate_type, warn)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
from pyvista import Plotter, PolyData, Line, close_all, UnstructuredGrid
try:
from pyvistaqt import BackgroundPlotter # noqa
except ImportError:
from pyvista import BackgroundPlotter
from pyvista.plotting.plotting import _ALL_PLOTTERS
from vtkmodules.vtkCommonCore import (
vtkCommand, vtkLookupTable, VTK_UNSIGNED_CHAR)
from vtkmodules.vtkCommonDataModel import VTK_VERTEX, vtkPiecewiseFunction
from vtkmodules.vtkCommonTransforms import vtkTransform
from vtkmodules.vtkFiltersCore import vtkCellDataToPointData, vtkGlyph3D
from vtkmodules.vtkFiltersGeneral import (
vtkTransformPolyDataFilter, vtkMarchingContourFilter)
from vtkmodules.vtkFiltersHybrid import vtkPolyDataSilhouette
from vtkmodules.vtkFiltersSources import (
vtkSphereSource, vtkConeSource, vtkCylinderSource, vtkArrowSource,
vtkPlatonicSolidSource, vtkGlyphSource2D)
from vtkmodules.vtkImagingCore import vtkImageReslice
from vtkmodules.vtkRenderingCore import (
vtkMapper, vtkActor, vtkCellPicker, vtkColorTransferFunction,
vtkPolyDataMapper, vtkVolume, vtkCoordinate, vtkDataSetMapper)
from vtkmodules.vtkRenderingVolumeOpenGL2 import vtkSmartVolumeMapper
from vtkmodules.util.numpy_support import numpy_to_vtk
try:
from vtkmodules.vtkCommonCore import VTK_VERSION
except Exception: # some bad versions of VTK
VTK_VERSION = '9.0'
VTK9 = _compare_version(VTK_VERSION, '>=', '9.0')
_FIGURES = dict()
class PyVistaFigure(Figure3D):
"""PyVista-based 3D Figure.
.. note:: This class should not be instantiated directly via
``mne.viz.PyVistaFigure(...)``. Instead, use
:func:`mne.viz.create_3d_figure`.
See Also
--------
mne.viz.create_3d_figure
"""
def __init__(self):
pass
def _init(self, plotter=None, show=False, title='PyVista Scene',
size=(600, 600), shape=(1, 1), background_color='black',
smooth_shading=True, off_screen=False, notebook=False,
splash=False, multi_samples=None):
self._plotter = plotter
self.display = None
self.background_color = background_color
self.smooth_shading = smooth_shading
self.notebook = notebook
self.title = title
self.splash = splash
self.store = dict()
self.store['window_size'] = size
self.store['shape'] = shape
self.store['off_screen'] = off_screen
self.store['border'] = False
self.store['multi_samples'] = multi_samples
if not self.notebook:
self.store['show'] = show
self.store['title'] = title
self.store['auto_update'] = False
self.store['menu_bar'] = False
self.store['toolbar'] = False
self.store['update_app_icon'] = False
self._plotter_class = BackgroundPlotter
if 'app_window_class' in signature(BackgroundPlotter).parameters:
from ._qt import _MNEMainWindow
self.store['app_window_class'] = _MNEMainWindow
else:
self._plotter_class = Plotter
self._nrows, self._ncols = self.store['shape']
self._azimuth = self._elevation = None
def _build(self):
if self.plotter is None:
if not self.notebook:
out = _init_mne_qtapp(
enable_icon=hasattr(self._plotter_class, 'set_icon'),
splash=self.splash)
# replace it with the Qt object
if self.splash:
self.splash = out[1]
app = out[0]
else:
app = out
self.store['app'] = app
plotter = self._plotter_class(**self.store)
plotter.background_color = self.background_color
self._plotter = plotter
if self.plotter.iren is not None:
self.plotter.iren.initialize()
_process_events(self.plotter)
_process_events(self.plotter)
return self.plotter
def _is_active(self):
if self.plotter is None:
return False
return hasattr(self.plotter, 'ren_win')
class _Projection(object):
"""Class storing projection information.
Attributes
----------
xy : array
Result of 2d projection of 3d data.
pts : None
Scene sensors handle.
"""
def __init__(self, *, xy, pts, plotter):
"""Store input projection information into attributes."""
self.xy = xy
self.pts = pts
self.plotter = plotter
def visible(self, state):
"""Modify visibility attribute of the sensors."""
self.pts.SetVisibility(state)
self.plotter.render()
@copy_base_doc_to_subclass_doc
class _PyVistaRenderer(_AbstractRenderer):
"""Class managing rendering scene.
Attributes
----------
plotter: Plotter
Main PyVista access point.
name: str
Name of the window.
"""
def __init__(self, fig=None, size=(600, 600), bgcolor='black',
name="PyVista Scene", show=False, shape=(1, 1),
notebook=None, smooth_shading=True, splash=False,
multi_samples=None):
from .._3d import _get_3d_option
_require_version('pyvista', 'use 3D rendering', '0.32')
multi_samples = _get_3d_option('multi_samples')
# multi_samples > 1 is broken on macOS + Intel Iris + volume rendering
if sys.platform == 'darwin':
multi_samples = 1
figure = PyVistaFigure()
figure._init(
show=show, title=name, size=size, shape=shape,
background_color=bgcolor, notebook=notebook,
smooth_shading=smooth_shading, splash=splash,
multi_samples=multi_samples)
self.font_family = "arial"
self.tube_n_sides = 20
self.antialias = _get_3d_option('antialias')
self.depth_peeling = _get_3d_option('depth_peeling')
self.smooth_shading = smooth_shading
if isinstance(fig, int):
saved_fig = _FIGURES.get(fig)
# Restore only active plotter
if saved_fig is not None and saved_fig._is_active():
self.figure = saved_fig
else:
self.figure = figure
_FIGURES[fig] = self.figure
elif fig is None:
self.figure = figure
else:
self.figure = fig
# Enable off_screen if sphinx-gallery or testing
if pyvista.OFF_SCREEN:
self.figure.store['off_screen'] = True
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
# pyvista theme may enable depth peeling by default so
# we disable it initially to better control the value afterwards
with _disabled_depth_peeling():
self.plotter = self.figure._build()
self._hide_axes()
self._enable_antialias()
self._enable_depth_peeling()
# FIX: https://github.com/pyvista/pyvistaqt/pull/68
if not hasattr(self.plotter, "iren"):
self.plotter.iren = None
self.update_lighting()
@property
def _all_plotters(self):
if self.figure.plotter is not None:
return [self.figure.plotter]
else:
return list()
@property
def _all_renderers(self):
if self.figure.plotter is not None:
return self.figure.plotter.renderers
else:
return list()
def _hide_axes(self):
for renderer in self._all_renderers:
renderer.hide_axes()
def _update(self):
for plotter in self._all_plotters:
plotter.update()
def _index_to_loc(self, idx):
_ncols = self.figure._ncols
row = idx // _ncols
col = idx % _ncols
return (row, col)
def _loc_to_index(self, loc):
_ncols = self.figure._ncols
return loc[0] * _ncols + loc[1]
def subplot(self, x, y):
x = np.max([0, np.min([x, self.figure._nrows - 1])])
y = np.max([0, np.min([y, self.figure._ncols - 1])])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
self.plotter.subplot(x, y)
def scene(self):
return self.figure
def update_lighting(self):
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
for renderer in self._all_renderers:
lights = list(renderer.GetLights())
headlight = lights.pop(0)
headlight.SetSwitch(False)
# below and centered, left and above, right and above
az_el_in = ((0, -45, 0.7), (-60, 30, 0.7), (60, 30, 0.7))
for li, light in enumerate(lights):
if li < len(az_el_in):
light.SetSwitch(True)
light.SetPosition(_to_pos(*az_el_in[li][:2]))
light.SetIntensity(az_el_in[li][2])
else:
light.SetSwitch(False)
light.SetPosition(_to_pos(0.0, 0.0))
light.SetIntensity(0.0)
light.SetColor(1.0, 1.0, 1.0)
def set_interaction(self, interaction):
if not hasattr(self.plotter, "iren") or self.plotter.iren is None:
return
if interaction == "rubber_band_2d":
for renderer in self._all_renderers:
renderer.enable_parallel_projection()
if hasattr(self.plotter, 'enable_rubber_band_2d_style'):
self.plotter.enable_rubber_band_2d_style()
else:
from vtkmodules.vtkInteractionStyle import\
vtkInteractorStyleRubberBand2D
style = vtkInteractorStyleRubberBand2D()
self.plotter.interactor.SetInteractorStyle(style)
else:
for renderer in self._all_renderers:
renderer.disable_parallel_projection()
kwargs = dict()
if interaction == 'terrain':
kwargs['mouse_wheel_zooms'] = True
getattr(self.plotter, f'enable_{interaction}_style')(**kwargs)
def legend(self, labels, border=False, size=0.1, face='triangle',
loc='upper left'):
return self.plotter.add_legend(
labels, size=(size, size), face=face, loc=loc)
def polydata(self, mesh, color=None, opacity=1.0, normals=None,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1.,
polygon_offset=None, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
rgba = False
if color is not None and len(color) == mesh.n_points:
if color.shape[1] == 3:
scalars = np.c_[color, np.ones(mesh.n_points)]
else:
scalars = color
scalars = (scalars * 255).astype('ubyte')
color = None
rgba = True
if isinstance(colormap, np.ndarray):
if colormap.dtype == np.uint8:
colormap = colormap.astype(np.float64) / 255.
from matplotlib.colors import ListedColormap
colormap = ListedColormap(colormap)
if normals is not None:
mesh.point_data["Normals"] = normals
mesh.GetPointData().SetActiveNormals("Normals")
else:
_compute_normals(mesh)
if 'rgba' in kwargs:
rgba = kwargs["rgba"]
kwargs.pop('rgba')
smooth_shading = self.smooth_shading
if representation == 'wireframe':
smooth_shading = False # never use smooth shading for wf
actor = _add_mesh(
plotter=self.plotter,
mesh=mesh, color=color, scalars=scalars, edge_color=color,
rgba=rgba, opacity=opacity, cmap=colormap,
backface_culling=backface_culling,
rng=[vmin, vmax], show_scalar_bar=False,
smooth_shading=smooth_shading,
interpolate_before_map=interpolate_before_map,
style=representation, line_width=line_width, **kwargs,
)
if polygon_offset is not None:
mapper = actor.GetMapper()
mapper.SetResolveCoincidentTopologyToPolygonOffset()
mapper.SetRelativeCoincidentTopologyPolygonOffsetParameters(
polygon_offset, polygon_offset)
return actor, mesh
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
vertices = np.c_[x, y, z].astype(float)
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
interpolate_before_map=interpolate_before_map,
representation=representation,
line_width=line_width,
polygon_offset=polygon_offset,
**kwargs,
)
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
if colormap is not None:
colormap = _get_colormap_from_array(colormap,
normalized_colormap)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
n_triangles = len(triangles)
triangles = np.c_[np.full(n_triangles, 3), triangles]
mesh = PolyData(vertices, triangles)
mesh.point_data['scalars'] = scalars
contour = mesh.contour(isosurfaces=contours)
line_width = width
if kind == 'tube':
contour = contour.tube(radius=width, n_sides=self.tube_n_sides)
line_width = 1.0
actor = _add_mesh(
plotter=self.plotter,
mesh=contour,
show_scalar_bar=False,
line_width=line_width,
color=color,
rng=[vmin, vmax],
cmap=colormap,
opacity=opacity,
smooth_shading=self.smooth_shading,
)
return actor, contour
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
normals = surface.get('nn', None)
vertices = np.array(surface['rr'])
triangles = np.array(surface['tris'])
triangles = np.c_[np.full(len(triangles), 3), triangles]
mesh = PolyData(vertices, triangles)
colormap = _get_colormap_from_array(colormap, normalized_colormap)
if scalars is not None:
mesh.point_data['scalars'] = scalars
return self.polydata(
mesh=mesh,
color=color,
opacity=opacity,
normals=normals,
backface_culling=backface_culling,
scalars=scalars,
colormap=colormap,
vmin=vmin,
vmax=vmax,
polygon_offset=polygon_offset,
)
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
from vtkmodules.vtkFiltersSources import vtkSphereSource
factor = 1.0 if radius is not None else scale
center = np.array(center, dtype=float)
if len(center) == 0:
return None, None
_check_option('center.ndim', center.ndim, (1, 2))
_check_option('center.shape[-1]', center.shape[-1], (3,))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
sphere = vtkSphereSource()
sphere.SetThetaResolution(resolution)
sphere.SetPhiResolution(resolution)
if radius is not None:
sphere.SetRadius(radius)
sphere.Update()
geom = sphere.GetOutput()
mesh = PolyData(center)
glyph = mesh.glyph(orient=False, scale=False,
factor=factor, geom=geom)
actor = _add_mesh(
self.plotter,
mesh=glyph, color=color, opacity=opacity,
backface_culling=backface_culling,
smooth_shading=self.smooth_shading
)
return actor, glyph
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
cmap = _get_colormap_from_array(colormap, normalized_colormap)
for (pointa, pointb) in zip(origin, destination):
line = Line(pointa, pointb)
if scalars is not None:
line.point_data['scalars'] = scalars[0, :]
scalars = 'scalars'
color = None
else:
scalars = None
tube = line.tube(radius, n_sides=self.tube_n_sides)
actor = _add_mesh(
plotter=self.plotter,
mesh=tube,
scalars=scalars,
flip_scalars=reverse_lut,
rng=[vmin, vmax],
color=color,
show_scalar_bar=False,
cmap=cmap,
smooth_shading=self.smooth_shading,
)
return actor, tube
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None, colormap=None,
backface_culling=False, line_width=2., name=None,
glyph_width=None, glyph_depth=None, glyph_radius=0.15,
solid_transform=None, *, clim=None):
_check_option('mode', mode, ALLOWED_QUIVER_MODES)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
factor = scale
vectors = np.c_[u, v, w]
points = np.vstack(np.c_[x, y, z])
n_points = len(points)
cell_type = np.full(n_points, VTK_VERTEX)
cells = np.c_[np.full(n_points, 1), range(n_points)]
args = (cells, cell_type, points)
if not VTK9:
args = (np.arange(n_points) * 3,) + args
grid = UnstructuredGrid(*args)
if scalars is None:
scalars = np.ones((n_points,))
grid.point_data['scalars'] = np.array(scalars)
grid.point_data['vec'] = vectors
if mode == '2darrow':
return _arrow_glyph(grid, factor), grid
elif mode == 'arrow':
alg = _glyph(
grid,
orient='vec',
scalars='scalars',
factor=factor
)
mesh = pyvista.wrap(alg.GetOutput())
else:
tr = None
if mode == 'cone':
glyph = vtkConeSource()
glyph.SetCenter(0.5, 0, 0)
if glyph_radius is not None:
glyph.SetRadius(glyph_radius)
elif mode == 'cylinder':
glyph = vtkCylinderSource()
if glyph_radius is not None:
glyph.SetRadius(glyph_radius)
elif mode == 'oct':
glyph = vtkPlatonicSolidSource()
glyph.SetSolidTypeToOctahedron()
else:
assert mode == 'sphere', mode # guaranteed above
glyph = vtkSphereSource()
if mode == 'cylinder':
if glyph_height is not None:
glyph.SetHeight(glyph_height)
if glyph_center is not None:
glyph.SetCenter(glyph_center)
if glyph_resolution is not None:
glyph.SetResolution(glyph_resolution)
tr = vtkTransform()
tr.RotateWXYZ(90, 0, 0, 1)
elif mode == 'oct':
if solid_transform is not None:
assert solid_transform.shape == (4, 4)
tr = vtkTransform()
tr.SetMatrix(
solid_transform.astype(np.float64).ravel())
if tr is not None:
# fix orientation
glyph.Update()
trp = vtkTransformPolyDataFilter()
trp.SetInputData(glyph.GetOutput())
trp.SetTransform(tr)
glyph = trp
glyph.Update()
geom = glyph.GetOutput()
mesh = grid.glyph(orient='vec', scale=scale_mode == 'vector',
factor=factor, geom=geom)
actor = _add_mesh(
self.plotter,
mesh=mesh,
color=color,
opacity=opacity,
scalars=None,
colormap=colormap,
show_scalar_bar=False,
backface_culling=backface_culling,
clim=clim,
)
return actor, mesh
def text2d(self, x_window, y_window, text, size=14, color='white',
justification=None):
size = 14 if size is None else size
position = (x_window, y_window)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
actor = self.plotter.add_text(text, position=position,
font_size=size,
color=color,
viewport=True)
if isinstance(justification, str):
if justification == 'left':
actor.GetTextProperty().SetJustificationToLeft()
elif justification == 'center':
actor.GetTextProperty().SetJustificationToCentered()
elif justification == 'right':
actor.GetTextProperty().SetJustificationToRight()
else:
raise ValueError('Expected values for `justification`'
'are `left`, `center` or `right` but '
'got {} instead.'.format(justification))
_hide_testing_actor(actor)
return actor
def text3d(self, x, y, z, text, scale, color='white'):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
kwargs = dict(
points=np.array([x, y, z]).astype(float),
labels=[text],
point_size=scale,
text_color=color,
font_family=self.font_family,
name=text,
shape_opacity=0,
)
if ('always_visible'
in signature(self.plotter.add_point_labels).parameters):
kwargs['always_visible'] = True
actor = self.plotter.add_point_labels(**kwargs)
_hide_testing_actor(actor)
return actor
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None, **extra_kwargs):
if isinstance(source, vtkMapper):
mapper = source
elif isinstance(source, vtkActor):
mapper = source.GetMapper()
else:
mapper = None
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
kwargs = dict(color=color, title=title, n_labels=n_labels,
use_opacity=False, n_colors=256, position_x=0.15,
position_y=0.05, width=0.7, shadow=False, bold=True,
label_font_size=22, font_family=self.font_family,
background_color=bgcolor, mapper=mapper)
kwargs.update(extra_kwargs)
actor = self.plotter.add_scalar_bar(**kwargs)
_hide_testing_actor(actor)
return actor
def show(self):
self.plotter.show()
def close(self):
_close_3d_figure(figure=self.figure)
def get_camera(self):
return _get_3d_view(self.figure)
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint='auto', roll=None, reset_camera=True,
rigid=None, update=True):
_set_3d_view(self.figure, azimuth=azimuth, elevation=elevation,
distance=distance, focalpoint=focalpoint, roll=roll,
reset_camera=reset_camera, rigid=rigid, update=update)
def reset_camera(self):
self.plotter.reset_camera()
def screenshot(self, mode='rgb', filename=None):
return _take_3d_screenshot(figure=self.figure, mode=mode,
filename=filename)
def project(self, xyz, ch_names):
xy = _3d_to_2d(self.plotter, xyz)
xy = dict(zip(ch_names, xy))
# pts = self.fig.children[-1]
pts = self.plotter.renderer.GetActors().GetLastItem()
return _Projection(xy=xy, pts=pts, plotter=self.plotter)
def _enable_depth_peeling(self):
if not self.depth_peeling:
return
if not self.figure.store['off_screen']:
for renderer in self._all_renderers:
renderer.enable_depth_peeling()
def _enable_antialias(self):
"""Enable it everywhere except Azure."""
if not self.antialias:
return
# XXX for some reason doing this on Azure causes access violations:
# ##[error]Cmd.exe exited with code '-1073741819'
# So for now don't use it there. Maybe has to do with setting these
# before the window has actually been made "active"...?
# For Mayavi we have an "on activated" event or so, we should look into
# using this for Azure at some point, too.
if self.figure._is_active():
# macOS, Azure
bad_system = (
sys.platform == 'darwin' or
os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true')
bad_system |= _is_mesa(self.plotter)
if not bad_system:
for renderer in self._all_renderers:
renderer.enable_anti_aliasing()
for plotter in self._all_plotters:
plotter.ren_win.LineSmoothingOn()
def remove_mesh(self, mesh_data):
actor, _ = mesh_data
self.plotter.remove_actor(actor)
@contextmanager
def _disabled_interaction(self):
if not self.plotter.renderer.GetInteractive():
yield
else:
self.plotter.disable()
try:
yield
finally:
self.plotter.enable()
def _actor(self, mapper=None):
actor = vtkActor()
if mapper is not None:
actor.SetMapper(mapper)
_hide_testing_actor(actor)
return actor
def _process_events(self):
for plotter in self._all_plotters:
_process_events(plotter)
def _update_picking_callback(self,
on_mouse_move,
on_button_press,
on_button_release,
on_pick):
add_obs = self.plotter.iren.add_observer
add_obs(vtkCommand.RenderEvent, on_mouse_move)
add_obs(vtkCommand.LeftButtonPressEvent, on_button_press)
add_obs(vtkCommand.EndInteractionEvent, on_button_release)
self.plotter.picker = vtkCellPicker()
self.plotter.picker.AddObserver(
vtkCommand.EndPickEvent,
on_pick
)
self.plotter.picker.SetVolumeOpacityIsovalue(0.)
def _set_mesh_scalars(self, mesh, scalars, name):
# Catch: FutureWarning: Conversion of the second argument of
# issubdtype from `complex` to `np.complexfloating` is deprecated.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
mesh.point_data[name] = scalars
def _set_colormap_range(self, actor, ctable, scalar_bar, rng=None,
background_color=None):
if rng is not None:
mapper = actor.GetMapper()
mapper.SetScalarRange(*rng)
lut = mapper.GetLookupTable()
lut.SetTable(numpy_to_vtk(ctable))
if scalar_bar is not None:
lut = scalar_bar.GetLookupTable()
if background_color is not None:
background_color = np.array(background_color) * 255
ctable = _alpha_blend_background(ctable, background_color)
lut.SetTable(numpy_to_vtk(ctable, array_type=VTK_UNSIGNED_CHAR))
lut.SetRange(*rng)
def _set_volume_range(self, volume, ctable, alpha, scalar_bar, rng):
color_tf = vtkColorTransferFunction()
opacity_tf = vtkPiecewiseFunction()
for loc, color in zip(np.linspace(*rng, num=len(ctable)), ctable):
color_tf.AddRGBPoint(loc, *(color[:-1] / 255.))
opacity_tf.AddPoint(loc, color[-1] * alpha / 255.)
color_tf.ClampingOn()
opacity_tf.ClampingOn()
prop = volume.GetProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.ShadeOn()
prop.SetInterpolationTypeToLinear()
if scalar_bar is not None:
lut = vtkLookupTable()
lut.SetRange(*rng)
lut.SetTable(numpy_to_vtk(ctable))
scalar_bar.SetLookupTable(lut)
def _sphere(self, center, color, radius):
from vtkmodules.vtkFiltersSources import vtkSphereSource
sphere = vtkSphereSource()
sphere.SetThetaResolution(8)
sphere.SetPhiResolution(8)
sphere.SetRadius(radius)
sphere.SetCenter(center)
sphere.Update()
mesh = pyvista.wrap(sphere.GetOutput())
actor = _add_mesh(
self.plotter,
mesh=mesh,
color=color
)
return actor, mesh
def _volume(self, dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending, center):
# Now we can actually construct the visualization
grid = pyvista.UniformGrid()
grid.dimensions = dimensions + 1 # inject data on the cells
grid.origin = origin
grid.spacing = spacing
grid.cell_data['values'] = scalars
# Add contour of enclosed volume (use GetOutput instead of
# GetOutputPort below to avoid updating)
if surface_alpha > 0 or resolution is not None:
grid_alg = vtkCellDataToPointData()
grid_alg.SetInputDataObject(grid)
grid_alg.SetPassCellData(False)
grid_alg.Update()
else:
grid_alg = None
if surface_alpha > 0:
grid_surface = vtkMarchingContourFilter()
grid_surface.ComputeNormalsOn()
grid_surface.ComputeScalarsOff()
grid_surface.SetInputData(grid_alg.GetOutput())
grid_surface.SetValue(0, 0.1)
grid_surface.Update()
grid_mesh = vtkPolyDataMapper()
grid_mesh.SetInputData(grid_surface.GetOutput())
else:
grid_mesh = None
mapper = vtkSmartVolumeMapper()
if resolution is None: # native
mapper.SetScalarModeToUseCellData()
mapper.SetInputDataObject(grid)
else:
upsampler = vtkImageReslice()
upsampler.SetInterpolationModeToLinear() # default anyway
upsampler.SetOutputSpacing(*([resolution] * 3))
upsampler.SetInputConnection(grid_alg.GetOutputPort())
mapper.SetInputConnection(upsampler.GetOutputPort())
# Additive, AverageIntensity, and Composite might also be reasonable
remap = dict(composite='Composite', mip='MaximumIntensity')
getattr(mapper, f'SetBlendModeTo{remap[blending]}')()
volume_pos = vtkVolume()
volume_pos.SetMapper(mapper)
dist = grid.length / (np.mean(grid.dimensions) - 1)
volume_pos.GetProperty().SetScalarOpacityUnitDistance(dist)
if center is not None and blending == 'mip':
# We need to create a minimum intensity projection for the neg half
mapper_neg = vtkSmartVolumeMapper()
if resolution is None: # native
mapper_neg.SetScalarModeToUseCellData()
mapper_neg.SetInputDataObject(grid)
else:
mapper_neg.SetInputConnection(upsampler.GetOutputPort())
mapper_neg.SetBlendModeToMinimumIntensity()
volume_neg = vtkVolume()
volume_neg.SetMapper(mapper_neg)
volume_neg.GetProperty().SetScalarOpacityUnitDistance(dist)
else:
volume_neg = None
return grid, grid_mesh, volume_pos, volume_neg
def _silhouette(self, mesh, color=None, line_width=None, alpha=None,
decimate=None):
mesh = mesh.decimate(decimate) if decimate is not None else mesh
silhouette_filter = vtkPolyDataSilhouette()
silhouette_filter.SetInputData(mesh)
silhouette_filter.SetCamera(self.plotter.renderer.GetActiveCamera())
silhouette_filter.SetEnableFeatureAngle(0)
silhouette_mapper = vtkPolyDataMapper()
silhouette_mapper.SetInputConnection(
silhouette_filter.GetOutputPort())
actor, prop = self.plotter.add_actor(
silhouette_mapper, reset_camera=False, name=None,
culling=False, pickable=False, render=False)
if color is not None:
prop.SetColor(*color)
if alpha is not None:
prop.SetOpacity(alpha)
if line_width is not None:
prop.SetLineWidth(line_width)
_hide_testing_actor(actor)
return actor
def _compute_normals(mesh):
"""Patch PyVista compute_normals."""
if 'Normals' not in mesh.point_data:
mesh.compute_normals(
cell_normals=False,
consistent_normals=False,
non_manifold_traversal=False,
inplace=True,
)
def _add_mesh(plotter, *args, **kwargs):
"""Patch PyVista add_mesh."""
mesh = kwargs.get('mesh')
if 'smooth_shading' in kwargs:
smooth_shading = kwargs.pop('smooth_shading')
else:
smooth_shading = True
# disable rendering pass for add_mesh, render()
# is called in show()
if 'render' not in kwargs:
kwargs['render'] = False
actor = plotter.add_mesh(*args, **kwargs)
if smooth_shading and 'Normals' in mesh.point_data:
prop = actor.GetProperty()
prop.SetInterpolationToPhong()
_hide_testing_actor(actor)
return actor
def _hide_testing_actor(actor):
from . import renderer
if renderer.MNE_3D_BACKEND_TESTING:
actor.SetVisibility(False)
def _deg2rad(deg):
return deg * np.pi / 180.
def _rad2deg(rad):
return rad * 180. / np.pi
def _to_pos(azimuth, elevation):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
def _mat_to_array(vtk_mat):
e = [vtk_mat.GetElement(i, j) for i in range(4) for j in range(4)]
arr = np.array(e, dtype=float)
arr.shape = (4, 4)
return arr
def _3d_to_2d(plotter, xyz):
# https://vtk.org/Wiki/VTK/Examples/Cxx/Utilities/Coordinate
coordinate = vtkCoordinate()
coordinate.SetCoordinateSystemToWorld()
xy = list()
for coord in xyz:
coordinate.SetValue(*coord)
xy.append(coordinate.GetComputedLocalDisplayValue(plotter.renderer))
xy = np.array(xy, float).reshape(-1, 2) # in case it's empty
return xy
def _close_all():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
close_all()
_FIGURES.clear()
def _get_camera_direction(focalpoint, position):
x, y, z = position - focalpoint
r = np.sqrt(x * x + y * y + z * z)
theta = np.arccos(z / r)
phi = np.arctan2(y, x)
return r, theta, phi
def _get_3d_view(figure):
position = np.array(figure.plotter.camera_position[0])
focalpoint = np.array(figure.plotter.camera_position[1])
_, theta, phi = _get_camera_direction(focalpoint, position)
azimuth, elevation = _rad2deg(phi), _rad2deg(theta)
return (figure.plotter.camera.GetRoll(),
figure.plotter.camera.GetDistance(),
azimuth, elevation, focalpoint)
def _set_3d_view(figure, azimuth=None, elevation=None, focalpoint='auto',
distance=None, roll=None, reset_camera=True, rigid=None,
update=True):
rigid = np.eye(4) if rigid is None else rigid
position = np.array(figure.plotter.camera_position[0])
bounds = np.array(figure.plotter.renderer.ComputeVisiblePropBounds())
if reset_camera:
figure.plotter.reset_camera(render=False)
# focalpoint: if 'auto', we use the center of mass of the visible
# bounds, if None, we use the existing camera focal point otherwise
# we use the values given by the user
if isinstance(focalpoint, str):
_check_option('focalpoint', focalpoint, ('auto',),
extra='when a string')
focalpoint = (bounds[1::2] + bounds[::2]) * 0.5
elif focalpoint is None:
focalpoint = np.array(figure.plotter.camera_position[1])
else:
focalpoint = np.asarray(focalpoint)
# work in the transformed space
position = apply_trans(rigid, position)
focalpoint = apply_trans(rigid, focalpoint)
_, theta, phi = _get_camera_direction(focalpoint, position)
if azimuth is not None:
phi = _deg2rad(azimuth)
if elevation is not None:
theta = _deg2rad(elevation)
# set the distance
if distance is None:
distance = max(bounds[1::2] - bounds[::2]) * 2.0
# Now calculate the view_up vector of the camera. If the view up is
# close to the 'z' axis, the view plane normal is parallel to the
# camera which is unacceptable, so we use a different view up.
if elevation is None or 5. <= abs(elevation) <= 175.:
view_up = [0, 0, 1]
else:
view_up = [0, 1, 0]
position = [
distance * np.cos(phi) * np.sin(theta),
distance * np.sin(phi) * np.sin(theta),
distance * np.cos(theta)]
figure._azimuth = _rad2deg(phi)
figure._elevation = _rad2deg(theta)
# restore to the original frame
rigid = np.linalg.inv(rigid)
position = apply_trans(rigid, position)
focalpoint = apply_trans(rigid, focalpoint)
view_up = apply_trans(rigid, view_up, move=False)
figure.plotter.camera_position = [
position, focalpoint, view_up]
# We need to add the requested roll to the roll dictated by the
# transformed view_up
if roll is not None:
figure.plotter.camera.SetRoll(figure.plotter.camera.GetRoll() + roll)
if update:
figure.plotter.update()
_process_events(figure.plotter)
def _set_3d_title(figure, title, size=16):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
figure.plotter.add_text(title, font_size=size, color='white',
name='title')
figure.plotter.update()
_process_events(figure.plotter)
def _check_3d_figure(figure):
_validate_type(figure, PyVistaFigure, 'figure')
def _close_3d_figure(figure):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
# copy the plotter locally because figure.plotter is modified
plotter = figure.plotter
# close the window
plotter.close() # additional cleaning following signal_close
_process_events(plotter)
# free memory and deregister from the scraper
plotter.deep_clean() # remove internal references
_ALL_PLOTTERS.pop(plotter._id_name, None)
_process_events(plotter)
def _take_3d_screenshot(figure, mode='rgb', filename=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
_process_events(figure.plotter)
return figure.plotter.screenshot(
transparent_background=(mode == 'rgba'),
filename=filename)
def _process_events(plotter):
if hasattr(plotter, 'app'):
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
plotter.app.processEvents()
def _add_camera_callback(camera, callback):
camera.AddObserver(vtkCommand.ModifiedEvent, callback)
def _arrow_glyph(grid, factor):
glyph = vtkGlyphSource2D()
glyph.SetGlyphTypeToArrow()
glyph.FilledOff()
glyph.Update()
# fix position
tr = vtkTransform()
tr.Translate(0.5, 0., 0.)
trp = vtkTransformPolyDataFilter()
trp.SetInputConnection(glyph.GetOutputPort())
trp.SetTransform(tr)
trp.Update()
alg = _glyph(
grid,
scale_mode='vector',
scalars=False,
orient='vec',
factor=factor,
geom=trp.GetOutputPort(),
)
mapper = vtkDataSetMapper()
mapper.SetInputConnection(alg.GetOutputPort())
return mapper
def _glyph(dataset, scale_mode='scalar', orient=True, scalars=True, factor=1.0,
geom=None, tolerance=0.0, absolute=False, clamping=False, rng=None):
if geom is None:
arrow = vtkArrowSource()
arrow.Update()
geom = arrow.GetOutputPort()
alg = vtkGlyph3D()
alg.SetSourceConnection(geom)
if isinstance(scalars, str):
dataset.active_scalars_name = scalars
if isinstance(orient, str):
dataset.active_vectors_name = orient
orient = True
if scale_mode == 'scalar':
alg.SetScaleModeToScaleByScalar()
elif scale_mode == 'vector':
alg.SetScaleModeToScaleByVector()
else:
alg.SetScaleModeToDataScalingOff()
if rng is not None:
alg.SetRange(rng)
alg.SetOrient(orient)
alg.SetInputData(dataset)
alg.SetScaleFactor(factor)
alg.SetClamping(clamping)
alg.Update()
return alg
@contextmanager
def _disabled_depth_peeling():
try:
from pyvista import global_theme
except Exception: # workaround for older PyVista
from pyvista import rcParams
depth_peeling = rcParams['depth_peeling']
else:
depth_peeling = global_theme.depth_peeling
depth_peeling_enabled = depth_peeling["enabled"]
depth_peeling["enabled"] = False
try:
yield
finally:
depth_peeling["enabled"] = depth_peeling_enabled
def _is_mesa(plotter):
# MESA (could use GPUInfo / _get_gpu_info here, but it takes
# > 700 ms to make a new window + report capabilities!)
# CircleCI's is: "Mesa 20.0.8 via llvmpipe (LLVM 10.0.0, 256 bits)"
gpu_info_full = plotter.ren_win.ReportCapabilities()
gpu_info = re.findall("OpenGL renderer string:(.+)\n", gpu_info_full)
gpu_info = ' '.join(gpu_info).lower()
is_mesa = 'mesa' in gpu_info.split()
if is_mesa:
# Try to warn if it's ancient
version = re.findall("mesa ([0-9.]+) .*", gpu_info) or \
re.findall("OpenGL version string: .* Mesa ([0-9.]+)\n",
gpu_info_full)
if version:
version = version[0]
if _compare_version(version, '<', '18.3.6'):
warn(f'Mesa version {version} is too old for translucent 3D '
'surface rendering, consider upgrading to 18.3.6 or '
'later.')
else:
raise RuntimeError
return is_mesa
| bsd-3-clause | 4038c0f13df4a226bfbaab566cb7efc3 | 37.140523 | 79 | 0.577628 | 3.983276 | false | false | false | false |
mne-tools/mne-python | mne/preprocessing/ica.py | 1 | 125844 | # -*- coding: utf-8 -*-
#
# Authors: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Juergen Dammers <j.dammers@fz-juelich.de>
#
# License: BSD-3-Clause
from inspect import isfunction, signature, Parameter
from collections import namedtuple
from collections.abc import Sequence
from copy import deepcopy
from numbers import Integral
from time import time
from dataclasses import dataclass
from typing import Optional, List
import warnings
import math
import json
import numpy as np
from .ecg import (qrs_detector, _get_ecg_channel_index, _make_ecg,
create_ecg_epochs)
from .eog import _find_eog_events, _get_eog_channel_index
from .infomax_ import infomax
from ..cov import compute_whitener
from .. import Covariance, Evoked
from ..defaults import (_BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT,
_INTERPOLATION_DEFAULT)
from ..io.pick import (pick_types, pick_channels, pick_info,
_picks_to_idx, _get_channel_types, _DATA_CH_TYPES_SPLIT)
from ..io.proj import make_projector
from ..io.write import (write_double_matrix, write_string,
write_name_list, write_int, start_block,
end_block)
from ..io.tree import dir_tree_find
from ..io.open import fiff_open
from ..io.tag import read_tag
from ..io.meas_info import write_meas_info, read_meas_info, ContainsMixin
from ..io.constants import FIFF
from ..io.base import BaseRaw
from ..io.eeglab.eeglab import _get_info, _check_load_mat
from ..epochs import BaseEpochs
from ..viz import (plot_ica_components, plot_ica_scores,
plot_ica_sources, plot_ica_overlay)
from ..viz.ica import plot_ica_properties
from ..viz.topomap import _plot_corrmap
from ..channels.channels import _contains_ch_type
from ..channels.layout import _find_topomap_coords
from ..io.write import start_and_end_file, write_id
from ..utils import (logger, check_fname, _check_fname, verbose,
_reject_data_segments, check_random_state, _validate_type,
compute_corr, _get_inst_data, _ensure_int, repr_html,
copy_function_doc_to_method_doc, _pl, warn, Bunch,
_check_preload, _check_compensation_grade, fill_doc,
_check_option, _PCA, int_like, _require_version,
_check_all_same_channel_names, _check_on_missing,
_on_missing)
from ..fixes import _safe_svd
from ..filter import filter_data
from .bads import _find_outliers
from .ctps_ import ctps
from ..io.pick import pick_channels_regexp, _picks_by_type
__all__ = ('ICA', 'ica_find_ecg_events', 'ica_find_eog_events',
'get_score_funcs', 'read_ica', 'read_ica_eeglab')
def _make_xy_sfunc(func, ndim_output=False):
"""Aux function."""
if ndim_output:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])[:, 0]
else:
def sfunc(x, y):
return np.array([func(a, y.ravel()) for a in x])
sfunc.__name__ = '.'.join(['score_func', func.__module__, func.__name__])
sfunc.__doc__ = func.__doc__
return sfunc
# Violate our assumption that the output is 1D so can't be used.
# Could eventually be added but probably not worth the effort unless someone
# requests it.
_BLOCKLIST = {'somersd'}
# makes score funcs attr accessible for users
def get_score_funcs():
"""Get the score functions.
Returns
-------
score_funcs : dict
The score functions.
"""
from scipy import stats
from scipy.spatial import distance
score_funcs = Bunch()
xy_arg_dist_funcs = [(n, f) for n, f in vars(distance).items()
if isfunction(f) and not n.startswith('_') and
n not in _BLOCKLIST]
xy_arg_stats_funcs = [(n, f) for n, f in vars(stats).items()
if isfunction(f) and not n.startswith('_') and
n not in _BLOCKLIST]
score_funcs.update({n: _make_xy_sfunc(f)
for n, f in xy_arg_dist_funcs
if signature(f).parameters == ['u', 'v']})
# In SciPy 1.9+, pearsonr has (x, y, *, alternative='two-sided'), so we
# should just look at the positional_only and positional_or_keyword entries
for n, f in xy_arg_stats_funcs:
params = [name for name, param in signature(f).parameters.items()
if param.kind in
(Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)]
if params == ['x', 'y']:
score_funcs.update({n: _make_xy_sfunc(f, ndim_output=True)})
assert 'pearsonr' in score_funcs
return score_funcs
def _check_for_unsupported_ica_channels(picks, info, allow_ref_meg=False):
"""Check for channels in picks that are not considered valid channels.
Accepted channels are the data channels
('seeg', 'dbs', 'ecog', 'eeg', 'hbo', 'hbr', 'mag', and 'grad'), 'eog'
and 'ref_meg'.
This prevents the program from crashing without
feedback when a bad channel is provided to ICA whitening.
"""
types = _DATA_CH_TYPES_SPLIT + ('eog',)
types += ('ref_meg',) if allow_ref_meg else ()
chs = _get_channel_types(info, picks, unique=True, only_data_chs=False)
check = all([ch in types for ch in chs])
if not check:
raise ValueError('Invalid channel type%s passed for ICA: %s.'
'Only the following types are supported: %s'
% (_pl(chs), chs, types))
_KNOWN_ICA_METHODS = ('fastica', 'infomax', 'picard')
@fill_doc
class ICA(ContainsMixin):
u"""Data decomposition using Independent Component Analysis (ICA).
This object estimates independent components from :class:`mne.io.Raw`,
:class:`mne.Epochs`, or :class:`mne.Evoked` objects. Components can
optionally be removed (for artifact repair) prior to signal reconstruction.
.. warning:: ICA is sensitive to low-frequency drifts and therefore
requires the data to be high-pass filtered prior to fitting.
Typically, a cutoff frequency of 1 Hz is recommended.
Parameters
----------
n_components : int | float | None
Number of principal components (from the pre-whitening PCA step) that
are passed to the ICA algorithm during fitting:
- :class:`int`
Must be greater than 1 and less than or equal to the number of
channels.
- :class:`float` between 0 and 1 (exclusive)
Will select the smallest number of components required to explain
the cumulative variance of the data greater than ``n_components``.
Consider this hypothetical example: we have 3 components, the first
explaining 70%%, the second 20%%, and the third the remaining 10%%
of the variance. Passing 0.8 here (corresponding to 80%% of
explained variance) would yield the first two components,
explaining 90%% of the variance: only by using both components the
requested threshold of 80%% explained variance can be exceeded. The
third component, on the other hand, would be excluded.
- ``None``
``0.999999`` will be used. This is done to avoid numerical
stability problems when whitening, particularly when working with
rank-deficient data.
Defaults to ``None``. The actual number used when executing the
:meth:`ICA.fit` method will be stored in the attribute
``n_components_`` (note the trailing underscore).
.. versionchanged:: 0.22
For a :class:`python:float`, the number of components will account
for *greater than* the given variance level instead of *less than or
equal to* it. The default (None) will also take into account the
rank deficiency of the data.
noise_cov : None | instance of Covariance
Noise covariance used for pre-whitening. If None (default), channels
are scaled to unit variance ("z-standardized") as a group by channel
type prior to the whitening by PCA.
%(random_state)s
method : 'fastica' | 'infomax' | 'picard'
The ICA method to use in the fit method. Use the ``fit_params`` argument
to set additional parameters. Specifically, if you want Extended
Infomax, set ``method='infomax'`` and ``fit_params=dict(extended=True)``
(this also works for ``method='picard'``). Defaults to ``'fastica'``.
For reference, see :footcite:`Hyvarinen1999,BellSejnowski1995,LeeEtAl1999,AblinEtAl2018`.
fit_params : dict | None
Additional parameters passed to the ICA estimator as specified by
``method``. Allowed entries are determined by the various algorithm
implementations: see :class:`~sklearn.decomposition.FastICA`,
:func:`~picard.picard`, :func:`~mne.preprocessing.infomax`.
max_iter : int | 'auto'
Maximum number of iterations during fit. If ``'auto'``, it
will set maximum iterations to ``1000`` for ``'fastica'``
and to ``500`` for ``'infomax'`` or ``'picard'``. The actual number of
iterations it took :meth:`ICA.fit` to complete will be stored in the
``n_iter_`` attribute.
allow_ref_meg : bool
Allow ICA on MEG reference channels. Defaults to False.
.. versionadded:: 0.18
%(verbose)s
Attributes
----------
current_fit : 'unfitted' | 'raw' | 'epochs'
Which data type was used for the fit.
ch_names : list-like
Channel names resulting from initial picking.
n_components_ : int
If fit, the actual number of PCA components used for ICA decomposition.
pre_whitener_ : ndarray, shape (n_channels, 1) or (n_channels, n_channels)
If fit, array used to pre-whiten the data prior to PCA.
pca_components_ : ndarray, shape ``(n_channels, n_channels)``
If fit, the PCA components.
pca_mean_ : ndarray, shape (n_channels,)
If fit, the mean vector used to center the data before doing the PCA.
pca_explained_variance_ : ndarray, shape ``(n_channels,)``
If fit, the variance explained by each PCA component.
mixing_matrix_ : ndarray, shape ``(n_components_, n_components_)``
If fit, the whitened mixing matrix to go back from ICA space to PCA
space.
It is, in combination with the ``pca_components_``, used by
:meth:`ICA.apply` and :meth:`ICA.get_components` to re-mix/project
a subset of the ICA components into the observed channel space.
The former method also removes the pre-whitening (z-scaling) and the
de-meaning.
unmixing_matrix_ : ndarray, shape ``(n_components_, n_components_)``
If fit, the whitened matrix to go from PCA space to ICA space.
Used, in combination with the ``pca_components_``, by the methods
:meth:`ICA.get_sources` and :meth:`ICA.apply` to unmix the observed
data.
exclude : array-like of int
List or np.array of sources indices to exclude when re-mixing the data
in the :meth:`ICA.apply` method, i.e. artifactual ICA components.
The components identified manually and by the various automatic
artifact detection methods should be (manually) appended
(e.g. ``ica.exclude.extend(eog_inds)``).
(There is also an ``exclude`` parameter in the :meth:`ICA.apply`
method.) To scrap all marked components, set this attribute to an empty
list.
%(info)s
n_samples_ : int
The number of samples used on fit.
labels_ : dict
A dictionary of independent component indices, grouped by types of
independent components. This attribute is set by some of the artifact
detection functions.
n_iter_ : int
If fit, the number of iterations required to complete ICA.
Notes
-----
.. versionchanged:: 0.23
Version 0.23 introduced the ``max_iter='auto'`` settings for maximum
iterations. With version 0.24 ``'auto'`` will be the new
default, replacing the current ``max_iter=200``.
.. versionchanged:: 0.23
Warn if `~mne.Epochs` were baseline-corrected.
.. note:: If you intend to fit ICA on `~mne.Epochs`, it is recommended to
high-pass filter, but **not** baseline correct the data for good
ICA performance. A warning will be emitted otherwise.
A trailing ``_`` in an attribute name signifies that the attribute was
added to the object during fitting, consistent with standard scikit-learn
practice.
ICA :meth:`fit` in MNE proceeds in two steps:
1. :term:`Whitening <whitening>` the data by means of a pre-whitening step
(using ``noise_cov`` if provided, or the standard deviation of each
channel type) and then principal component analysis (PCA).
2. Passing the ``n_components`` largest-variance components to the ICA
algorithm to obtain the unmixing matrix (and by pseudoinversion, the
mixing matrix).
ICA :meth:`apply` then:
1. Unmixes the data with the ``unmixing_matrix_``.
2. Includes ICA components based on ``ica.include`` and ``ica.exclude``.
3. Re-mixes the data with ``mixing_matrix_``.
4. Restores any data not passed to the ICA algorithm, i.e., the PCA
components between ``n_components`` and ``n_pca_components``.
``n_pca_components`` determines how many PCA components will be kept when
reconstructing the data when calling :meth:`apply`. This parameter can be
used for dimensionality reduction of the data, or dealing with low-rank
data (such as those with projections, or MEG data processed by SSS). It is
important to remove any numerically-zero-variance components in the data,
otherwise numerical instability causes problems when computing the mixing
matrix. Alternatively, using ``n_components`` as a float will also avoid
numerical stability problems.
The ``n_components`` parameter determines how many components out of
the ``n_channels`` PCA components the ICA algorithm will actually fit.
This is not typically used for EEG data, but for MEG data, it's common to
use ``n_components < n_channels``. For example, full-rank
306-channel MEG data might use ``n_components=40`` to find (and
later exclude) only large, dominating artifacts in the data, but still
reconstruct the data using all 306 PCA components. Setting
``n_pca_components=40``, on the other hand, would actually reduce the
rank of the reconstructed data to 40, which is typically undesirable.
If you are migrating from EEGLAB and intend to reduce dimensionality via
PCA, similarly to EEGLAB's ``runica(..., 'pca', n)`` functionality,
pass ``n_components=n`` during initialization and then
``n_pca_components=n`` during :meth:`apply`. The resulting reconstructed
data after :meth:`apply` will have rank ``n``.
.. note:: Commonly used for reasons of i) computational efficiency and
ii) additional noise reduction, it is a matter of current debate
whether pre-ICA dimensionality reduction could decrease the
reliability and stability of the ICA, at least for EEG data and
especially during preprocessing :footcite:`ArtoniEtAl2018`.
(But see also :footcite:`Montoya-MartinezEtAl2017` for a
possibly confounding effect of the different whitening/sphering
methods used in this paper (ZCA vs. PCA).)
On the other hand, for rank-deficient data such as EEG data after
average reference or interpolation, it is recommended to reduce
the dimensionality (by 1 for average reference and 1 for each
interpolated channel) for optimal ICA performance (see the
`EEGLAB wiki <eeglab_wiki_>`_).
Caveat! If supplying a noise covariance, keep track of the projections
available in the cov or in the raw object. For example, if you are
interested in EOG or ECG artifacts, EOG and ECG projections should be
temporally removed before fitting ICA, for example::
>> projs, raw.info['projs'] = raw.info['projs'], []
>> ica.fit(raw)
>> raw.info['projs'] = projs
Methods currently implemented are FastICA (default), Infomax, and Picard.
Standard Infomax can be quite sensitive to differences in floating point
arithmetic. Extended Infomax seems to be more stable in this respect,
enhancing reproducibility and stability of results; use Extended Infomax
via ``method='infomax', fit_params=dict(extended=True)``. Allowed entries
in ``fit_params`` are determined by the various algorithm implementations:
see :class:`~sklearn.decomposition.FastICA`, :func:`~picard.picard`,
:func:`~mne.preprocessing.infomax`.
.. note:: Picard can be used to solve the same problems as FastICA,
Infomax, and extended Infomax, but typically converges faster
than either of those methods. To make use of Picard's speed while
still obtaining the same solution as with other algorithms, you
need to specify ``method='picard'`` and ``fit_params`` as a
dictionary with the following combination of keys:
- ``dict(ortho=False, extended=False)`` for Infomax
- ``dict(ortho=False, extended=True)`` for extended Infomax
- ``dict(ortho=True, extended=True)`` for FastICA
Reducing the tolerance (set in ``fit_params``) speeds up estimation at the
cost of consistency of the obtained results. It is difficult to directly
compare tolerance levels between Infomax and Picard, but for Picard and
FastICA a good rule of thumb is ``tol_fastica == tol_picard ** 2``.
.. _eeglab_wiki: https://eeglab.org/tutorials/06_RejectArtifacts/RunICA.html#how-to-deal-with-corrupted-ica-decompositions
References
----------
.. footbibliography::
""" # noqa: E501
@verbose
def __init__(self, n_components=None, *, noise_cov=None,
random_state=None, method='fastica', fit_params=None,
max_iter='auto', allow_ref_meg=False,
verbose=None): # noqa: D102
_validate_type(method, str, 'method')
_validate_type(n_components, (float, 'int-like', None))
if method != 'imported_eeglab': # internal use only
_check_option('method', method, _KNOWN_ICA_METHODS)
self.noise_cov = noise_cov
for (kind, val) in [('n_components', n_components)]:
if isinstance(val, float) and not 0 < val < 1:
raise ValueError('Selecting ICA components by explained '
'variance needs values between 0.0 and 1.0 '
f'(exclusive), got {kind}={val}')
if isinstance(val, int_like) and val == 1:
raise ValueError(
f'Selecting one component with {kind}={val} is not '
'supported')
self.current_fit = 'unfitted'
self.n_components = n_components
# In newer ICAs this should always be None, but keep it for
# backward compat with older versions of MNE that used it
self._max_pca_components = None
self.n_pca_components = None
self.ch_names = None
self.random_state = random_state
if fit_params is None:
fit_params = {}
fit_params = deepcopy(fit_params) # avoid side effects
if method == 'fastica':
update = {'algorithm': 'parallel', 'fun': 'logcosh',
'fun_args': None}
fit_params.update({k: v for k, v in update.items() if k
not in fit_params})
elif method == 'infomax':
# extended=True is default in underlying function, but we want
# default False here unless user specified True:
fit_params.setdefault('extended', False)
_validate_type(max_iter, (str, 'int-like'), 'max_iter')
if isinstance(max_iter, str):
_check_option('max_iter', max_iter, ('auto',), 'when str')
if method == 'fastica':
max_iter = 1000
elif method in ['infomax', 'picard']:
max_iter = 500
fit_params.setdefault('max_iter', max_iter)
self.max_iter = max_iter
self.fit_params = fit_params
self.exclude = []
self.info = None
self.method = method
self.labels_ = dict()
self.allow_ref_meg = allow_ref_meg
def _get_infos_for_repr(self):
@dataclass
class _InfosForRepr:
# XXX replace with Optional[Literal['raw data', 'epochs'] once we
# drop support for Py 3.7
fit_on: Optional[str]
# XXX replace with fit_method: Literal['fastica', 'infomax',
# 'extended-infomax', 'picard'] once we drop support for Py 3.7
fit_method: str
fit_n_iter: Optional[int]
fit_n_samples: Optional[int]
fit_n_components: Optional[int]
fit_n_pca_components: Optional[int]
ch_types: List[str]
excludes: List[str]
if self.current_fit == 'unfitted':
fit_on = None
elif self.current_fit == 'raw':
fit_on = 'raw data'
else:
fit_on = 'epochs'
fit_method = self.method
fit_n_iter = getattr(self, 'n_iter_', None)
fit_n_samples = getattr(self, 'n_samples_', None)
fit_n_components = getattr(self, 'n_components_', None)
fit_n_pca_components = getattr(self, 'pca_components_', None)
if fit_n_pca_components is not None:
fit_n_pca_components = len(self.pca_components_)
if self.info is not None:
ch_types = [c for c in _DATA_CH_TYPES_SPLIT if c in self]
else:
ch_types = []
if self.exclude:
excludes = [self._ica_names[i] for i in self.exclude]
else:
excludes = []
infos_for_repr = _InfosForRepr(
fit_on=fit_on,
fit_method=fit_method,
fit_n_iter=fit_n_iter,
fit_n_samples=fit_n_samples,
fit_n_components=fit_n_components,
fit_n_pca_components=fit_n_pca_components,
ch_types=ch_types,
excludes=excludes
)
return infos_for_repr
def __repr__(self):
"""ICA fit information."""
infos = self._get_infos_for_repr()
s = (f'{infos.fit_on or "no"} decomposition, '
f'method: {infos.fit_method}')
if infos.fit_on is not None:
s += (
f' (fit in {infos.fit_n_iter} iterations on '
f'{infos.fit_n_samples} samples), '
f'{infos.fit_n_components} ICA components '
f'({infos.fit_n_pca_components} PCA components available), '
f'channel types: {", ".join(infos.ch_types)}, '
f'{len(infos.excludes) or "no"} sources marked for exclusion'
)
return f'<ICA | {s}>'
@repr_html
def _repr_html_(self):
from ..html_templates import repr_templates_env
infos = self._get_infos_for_repr()
t = repr_templates_env.get_template('ica.html.jinja')
html = t.render(
fit_on=infos.fit_on,
method=infos.fit_method,
n_iter=infos.fit_n_iter,
n_samples=infos.fit_n_samples,
n_components=infos.fit_n_components,
n_pca_components=infos.fit_n_pca_components,
ch_types=infos.ch_types,
excludes=infos.excludes
)
return html
@verbose
def fit(self, inst, picks=None, start=None, stop=None, decim=None,
reject=None, flat=None, tstep=2.0, reject_by_annotation=True,
verbose=None):
"""Run the ICA decomposition on raw data.
Caveat! If supplying a noise covariance keep track of the projections
available in the cov, the raw or the epochs object. For example,
if you are interested in EOG or ECG artifacts, EOG and ECG projections
should be temporally removed before fitting the ICA.
Parameters
----------
inst : instance of Raw or Epochs
The data to be decomposed.
%(picks_good_data_noref)s
This selection remains throughout the initialized ICA solution.
start, stop : int | float | None
First and last sample to include. If float, data will be
interpreted as time in seconds. If ``None``, data will be used from
the first sample and to the last sample, respectively.
.. note:: These parameters only have an effect if ``inst`` is
`~mne.io.Raw` data.
decim : int | None
Increment for selecting only each n-th sampling point. If ``None``,
all samples between ``start`` and ``stop`` (inclusive) are used.
reject, flat : dict | None
Rejection parameters based on peak-to-peak amplitude (PTP)
in the continuous data. Signal periods exceeding the thresholds
in ``reject`` or less than the thresholds in ``flat`` will be
removed before fitting the ICA.
.. note:: These parameters only have an effect if ``inst`` is
`~mne.io.Raw` data. For `~mne.Epochs`, perform PTP
rejection via :meth:`~mne.Epochs.drop_bad`.
Valid keys are all channel types present in the data. Values must
be integers or floats.
If ``None``, no PTP-based rejection will be performed. Example::
reject = dict(
grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat = None # no rejection based on flatness
tstep : float
Length of data chunks for artifact rejection in seconds.
.. note:: This parameter only has an effect if ``inst`` is
`~mne.io.Raw` data.
%(reject_by_annotation_raw)s
.. versionadded:: 0.14.0
%(verbose)s
Returns
-------
self : instance of ICA
Returns the modified instance.
"""
req_map = dict(fastica='sklearn', picard='picard')
for method, mod in req_map.items():
if self.method == method:
_require_version(mod, f'use method={repr(method)}')
_validate_type(inst, (BaseRaw, BaseEpochs), 'inst', 'Raw or Epochs')
if np.isclose(inst.info['highpass'], 0.):
warn('The data has not been high-pass filtered. For good ICA '
'performance, it should be high-pass filtered (e.g., with a '
'1.0 Hz lower bound) before fitting ICA.')
if isinstance(inst, BaseEpochs) and inst.baseline is not None:
warn('The epochs you passed to ICA.fit() were baseline-corrected. '
'However, we suggest to fit ICA only on data that has been '
'high-pass filtered, but NOT baseline-corrected.')
if not isinstance(inst, BaseRaw):
ignored_params = [
param_name for param_name, param_val in zip(
('start', 'stop', 'reject', 'flat'),
(start, stop, reject, flat)
)
if param_val is not None
]
if ignored_params:
warn(f'The following parameters passed to ICA.fit() will be '
f'ignored, as they only affect raw data (and it appears '
f'you passed epochs): {", ".join(ignored_params)}')
picks = _picks_to_idx(inst.info, picks, allow_empty=False,
with_ref_meg=self.allow_ref_meg)
_check_for_unsupported_ica_channels(
picks, inst.info, allow_ref_meg=self.allow_ref_meg)
# Actually start fitting
t_start = time()
if self.current_fit != 'unfitted':
self._reset()
logger.info('Fitting ICA to data using %i channels '
'(please be patient, this may take a while)' % len(picks))
# n_components could be float 0 < x < 1, but that's okay here
if self.n_components is not None and self.n_components > len(picks):
raise ValueError(
f'ica.n_components ({self.n_components}) cannot '
f'be greater than len(picks) ({len(picks)})')
# filter out all the channels the raw wouldn't have initialized
self.info = pick_info(inst.info, picks)
if self.info['comps']:
with self.info._unlock():
self.info['comps'] = []
self.ch_names = self.info['ch_names']
if isinstance(inst, BaseRaw):
self._fit_raw(inst, picks, start, stop, decim, reject, flat,
tstep, reject_by_annotation, verbose)
else:
assert isinstance(inst, BaseEpochs)
self._fit_epochs(inst, picks, decim, verbose)
# sort ICA components by explained variance
var = _ica_explained_variance(self, inst)
var_ord = var.argsort()[::-1]
_sort_components(self, var_ord, copy=False)
t_stop = time()
logger.info("Fitting ICA took {:.1f}s.".format(t_stop - t_start))
return self
def _reset(self):
"""Aux method."""
for key in ('pre_whitener_', 'unmixing_matrix_', 'mixing_matrix_',
'n_components_', 'n_samples_', 'pca_components_',
'pca_explained_variance_',
'pca_mean_', 'n_iter_', 'drop_inds_', 'reject_'):
if hasattr(self, key):
delattr(self, key)
self.current_fit = 'unfitted'
def _fit_raw(self, raw, picks, start, stop, decim, reject, flat, tstep,
reject_by_annotation, verbose):
"""Aux method."""
start, stop = _check_start_stop(raw, start, stop)
reject_by_annotation = 'omit' if reject_by_annotation else None
# this will be a copy
data = raw.get_data(picks, start, stop, reject_by_annotation)
# this will be a view
if decim is not None:
data = data[:, ::decim]
# this will make a copy
if (reject is not None) or (flat is not None):
self.reject_ = reject
data, self.drop_inds_ = _reject_data_segments(data, reject, flat,
decim, self.info,
tstep)
else:
self.reject_ = None
self.n_samples_ = data.shape[1]
self._fit(data, 'raw')
return self
def _fit_epochs(self, epochs, picks, decim, verbose):
"""Aux method."""
if epochs.events.size == 0:
raise RuntimeError('Tried to fit ICA with epochs, but none were '
'found: epochs.events is "{}".'
.format(epochs.events))
# this should be a copy (picks a list of int)
data = epochs.get_data()[:, picks]
# this will be a view
if decim is not None:
data = data[:, :, ::decim]
self.n_samples_ = data.shape[0] * data.shape[2]
# This will make at least one copy (one from hstack, maybe one
# more from _pre_whiten)
data = np.hstack(data)
self._fit(data, 'epochs')
self.reject_ = deepcopy(epochs.reject)
return self
def _compute_pre_whitener(self, data):
"""Aux function."""
data = self._do_proj(data, log_suffix='(pre-whitener computation)')
if self.noise_cov is None:
# use standardization as whitener
# Scale (z-score) the data by channel type
info = self.info
pre_whitener = np.empty([len(data), 1])
for _, picks_ in _picks_by_type(info, ref_meg=False, exclude=[]):
pre_whitener[picks_] = np.std(data[picks_])
if _contains_ch_type(info, "ref_meg"):
picks_ = pick_types(info, ref_meg=True, exclude=[])
pre_whitener[picks_] = np.std(data[picks_])
if _contains_ch_type(info, "eog"):
picks_ = pick_types(info, eog=True, exclude=[])
pre_whitener[picks_] = np.std(data[picks_])
else:
pre_whitener, _ = compute_whitener(
self.noise_cov, self.info, picks=self.info.ch_names)
assert data.shape[0] == pre_whitener.shape[1]
self.pre_whitener_ = pre_whitener
def _do_proj(self, data, log_suffix=''):
if self.info is not None and self.info['projs']:
proj, nproj, _ = make_projector(
[p for p in self.info['projs'] if p['active']],
self.info['ch_names'], include_active=True)
if nproj:
logger.info(
f' Applying projection operator with {nproj} '
f'vector{_pl(nproj)}'
f'{" " if log_suffix else ""}{log_suffix}')
if self.noise_cov is None: # otherwise it's in pre_whitener_
data = proj @ data
return data
def _pre_whiten(self, data):
data = self._do_proj(data, log_suffix='(pre-whitener application)')
if self.noise_cov is None:
data /= self.pre_whitener_
else:
data = self.pre_whitener_ @ data
return data
def _fit(self, data, fit_type):
"""Aux function."""
random_state = check_random_state(self.random_state)
n_channels, n_samples = data.shape
self._compute_pre_whitener(data)
data = self._pre_whiten(data)
pca = _PCA(n_components=self._max_pca_components, whiten=True)
data = pca.fit_transform(data.T)
use_ev = pca.explained_variance_ratio_
n_pca = self.n_pca_components
if isinstance(n_pca, float):
n_pca = int(_exp_var_ncomp(use_ev, n_pca)[0])
elif n_pca is None:
n_pca = len(use_ev)
assert isinstance(n_pca, (int, np.int_))
# If user passed a float, select the PCA components explaining the
# given cumulative variance. This information will later be used to
# only submit the corresponding parts of the data to ICA.
if self.n_components is None:
# None case: check if n_pca_components or 0.999999 yields smaller
msg = 'Selecting by non-zero PCA components'
self.n_components_ = min(
n_pca, _exp_var_ncomp(use_ev, 0.999999)[0])
elif isinstance(self.n_components, float):
self.n_components_, ev = _exp_var_ncomp(use_ev, self.n_components)
if self.n_components_ == 1:
raise RuntimeError(
'One PCA component captures most of the '
f'explained variance ({100 * ev}%), your threshold '
'results in 1 component. You should select '
'a higher value.')
msg = 'Selecting by explained variance'
else:
msg = 'Selecting by number'
self.n_components_ = _ensure_int(self.n_components)
# check to make sure something okay happened
if self.n_components_ > n_pca:
ev = np.cumsum(use_ev)
ev /= ev[-1]
evs = 100 * ev[[self.n_components_ - 1, n_pca - 1]]
raise RuntimeError(
f'n_components={self.n_components} requires '
f'{self.n_components_} PCA values (EV={evs[0]:0.1f}%) but '
f'n_pca_components ({self.n_pca_components}) results in '
f'only {n_pca} components (EV={evs[1]:0.1f}%)')
logger.info('%s: %s components' % (msg, self.n_components_))
# the things to store for PCA
self.pca_mean_ = pca.mean_
self.pca_components_ = pca.components_
self.pca_explained_variance_ = pca.explained_variance_
del pca
# update number of components
self._update_ica_names()
if self.n_pca_components is not None and \
self.n_pca_components > len(self.pca_components_):
raise ValueError(
f'n_pca_components ({self.n_pca_components}) is greater than '
f'the number of PCA components ({len(self.pca_components_)})')
# take care of ICA
sel = slice(0, self.n_components_)
if self.method == 'fastica':
from sklearn.decomposition import FastICA
ica = FastICA(
whiten=False, random_state=random_state, **self.fit_params)
ica.fit(data[:, sel])
self.unmixing_matrix_ = ica.components_
self.n_iter_ = ica.n_iter_
elif self.method in ('infomax', 'extended-infomax'):
unmixing_matrix, n_iter = infomax(
data[:, sel], random_state=random_state, return_n_iter=True,
**self.fit_params)
self.unmixing_matrix_ = unmixing_matrix
self.n_iter_ = n_iter
del unmixing_matrix, n_iter
elif self.method == 'picard':
from picard import picard
_, W, _, n_iter = picard(
data[:, sel].T, whiten=False, return_n_iter=True,
random_state=random_state, **self.fit_params)
self.unmixing_matrix_ = W
self.n_iter_ = n_iter + 1 # picard() starts counting at 0
del _, n_iter
assert self.unmixing_matrix_.shape == (self.n_components_,) * 2
norms = self.pca_explained_variance_
stable = norms / norms[0] > 1e-6 # to be stable during pinv
norms = norms[:self.n_components_]
if not stable[self.n_components_ - 1]:
max_int = np.where(stable)[0][-1] + 1
warn(f'Using n_components={self.n_components} (resulting in '
f'n_components_={self.n_components_}) may lead to an '
f'unstable mixing matrix estimation because the ratio '
f'between the largest ({norms[0]:0.2g}) and smallest '
f'({norms[-1]:0.2g}) variances is too large (> 1e6); '
f'consider setting n_components=0.999999 or an '
f'integer <= {max_int}')
norms = np.sqrt(norms)
norms[norms == 0] = 1.
self.unmixing_matrix_ /= norms # whitening
self._update_mixing_matrix()
self.current_fit = fit_type
def _update_mixing_matrix(self):
from scipy import linalg
self.mixing_matrix_ = linalg.pinv(self.unmixing_matrix_)
def _update_ica_names(self):
"""Update ICA names when n_components_ is set."""
self._ica_names = ['ICA%03d' % ii for ii in range(self.n_components_)]
def _transform(self, data):
"""Compute sources from data (operates inplace)."""
data = self._pre_whiten(data)
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
# Apply unmixing
pca_data = np.dot(self.unmixing_matrix_,
self.pca_components_[:self.n_components_])
# Apply PCA
sources = np.dot(pca_data, data)
return sources
def _transform_raw(self, raw, start, stop, reject_by_annotation=False):
"""Transform raw data."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
start, stop = _check_start_stop(raw, start, stop)
picks = self._get_picks(raw)
reject = 'omit' if reject_by_annotation else None
data = raw.get_data(picks, start, stop, reject)
return self._transform(data)
def _transform_epochs(self, epochs, concatenate):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = self._get_picks(epochs)
data = np.hstack(epochs.get_data()[:, picks])
sources = self._transform(data)
if not concatenate:
# Put the data back in 3D
sources = np.array(np.split(sources, len(epochs.events), 1))
return sources
def _transform_evoked(self, evoked):
"""Aux method."""
if not hasattr(self, 'mixing_matrix_'):
raise RuntimeError('No fit available. Please fit ICA.')
picks = self._get_picks(evoked)
return self._transform(evoked.data[picks])
def _get_picks(self, inst):
"""Pick logic for _transform method."""
picks = _picks_to_idx(
inst.info, self.ch_names, exclude=[], allow_empty=True)
if len(picks) != len(self.ch_names):
if isinstance(inst, BaseRaw):
kind, do = 'Raw', "doesn't"
elif isinstance(inst, BaseEpochs):
kind, do = 'Epochs', "don't"
elif isinstance(inst, Evoked):
kind, do = 'Evoked', "doesn't"
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
raise RuntimeError("%s %s match fitted data: %i channels "
"fitted but %i channels supplied. \nPlease "
"provide %s compatible with ica.ch_names"
% (kind, do, len(self.ch_names), len(picks),
kind))
return picks
def get_components(self):
"""Get ICA topomap for components as numpy arrays.
Returns
-------
components : array, shape (n_channels, n_components)
The ICA components (maps).
"""
return np.dot(self.mixing_matrix_[:, :self.n_components_].T,
self.pca_components_[:self.n_components_]).T
def get_explained_variance_ratio(
self, inst, *, components=None, ch_type=None
):
"""Get the proportion of data variance explained by ICA components.
Parameters
----------
inst : mne.io.BaseRaw | mne.BaseEpochs | mne.Evoked
The uncleaned data.
components : array-like of int | int | None
The component(s) for which to do the calculation. If more than one
component is specified, explained variance will be calculated
jointly across all supplied components. If ``None`` (default), uses
all available components.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | array-like of str | None
The channel type(s) to include in the calculation. If ``None``, all
available channel types will be used.
Returns
-------
dict (str, float)
The fraction of variance in ``inst`` that can be explained by the
ICA components, calculated separately for each channel type.
Dictionary keys are the channel types, and corresponding explained
variance ratios are the values.
Notes
-----
A value similar to EEGLAB's ``pvaf`` (percent variance accounted for)
will be calculated for the specified component(s).
Since ICA components cannot be assumed to be aligned orthogonally, the
sum of the proportion of variance explained by all components may not
be equal to 1. In certain situations, the proportion of variance
explained by a component may even be negative.
.. versionadded:: 1.2
""" # noqa: E501
if self.current_fit == 'unfitted':
raise ValueError('ICA must be fitted first.')
_validate_type(
item=inst, types=(BaseRaw, BaseEpochs, Evoked),
item_name='inst'
)
_validate_type(
item=components, types=(None, 'int-like', Sequence, np.ndarray),
item_name='components', type_name='int, array-like of int, or None'
)
if isinstance(components, (Sequence, np.ndarray)):
for item in components:
_validate_type(
item=item, types='int-like',
item_name='Elements of "components"'
)
_validate_type(
item=ch_type, types=(Sequence, np.ndarray, str, None),
item_name='ch_type', type_name='str, array-like of str, or None'
)
if isinstance(ch_type, str):
ch_types = [ch_type]
elif ch_type is None:
ch_types = inst.get_channel_types(unique=True, only_data_chs=True)
else:
assert isinstance(ch_type, (Sequence, np.ndarray))
ch_types = ch_type
assert len(ch_types) >= 1
allowed_ch_types = ('mag', 'grad', 'planar1', 'planar2', 'eeg')
for ch_type in ch_types:
if ch_type not in allowed_ch_types:
raise ValueError(
f'You requested operation on the channel type '
f'"{ch_type}", but only the following channel types are '
f'supported: {", ".join(allowed_ch_types)}'
)
del ch_type
# Input data validation ends here
if components is None:
components = range(self.n_components_)
explained_var_ratios = [
self._get_explained_variance_ratio_one_ch_type(
inst=inst, components=components, ch_type=ch_type
) for ch_type in ch_types
]
result = dict(zip(ch_types, explained_var_ratios))
return result
def _get_explained_variance_ratio_one_ch_type(
self, *, inst, components, ch_type
):
# The algorithm implemented below should be equivalent to
# https://sccn.ucsd.edu/pipermail/eeglablist/2014/009134.html
#
# Reconstruct ("back-project") the data using only the specified ICA
# components. Don't make use of potential "spare" PCA components in
# this process – we're only interested in the contribution of the ICA
# components!
kwargs = dict(
inst=inst.copy(),
include=[components],
exclude=[],
n_pca_components=0,
verbose=False,
)
if (
isinstance(inst, (BaseEpochs, Evoked)) and
inst.baseline is not None
):
# Don't warn if data was baseline-corrected.
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
message='The data.*was baseline-corrected',
category=RuntimeWarning
)
inst_recon = self.apply(**kwargs)
else:
inst_recon = self.apply(**kwargs)
data_recon = inst_recon.get_data(picks=ch_type)
data_orig = inst.get_data(picks=ch_type)
data_diff = data_orig - data_recon
# To estimate the data variance, we first compute the variance across
# channels at each time point, and then we average these variances.
mean_var_diff = data_diff.var(axis=0).mean()
mean_var_orig = data_orig.var(axis=0).mean()
var_explained_ratio = 1 - mean_var_diff / mean_var_orig
return var_explained_ratio
def get_sources(self, inst, add_channels=None, start=None, stop=None):
"""Estimate sources given the unmixing matrix.
This method will return the sources in the container format passed.
Typical usecases:
1. pass Raw object to use `raw.plot <mne.io.Raw.plot>` for ICA sources
2. pass Epochs object to compute trial-based statistics in ICA space
3. pass Evoked object to investigate time-locking in ICA space
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from and to represent sources in.
add_channels : None | list of str
Additional channels to be added. Useful to e.g. compare sources
with some reference. Defaults to None.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, the entire data will be used.
Returns
-------
sources : instance of Raw, Epochs or Evoked
The ICA sources time series.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._sources_as_raw(inst, add_channels, start, stop)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._sources_as_epochs(inst, add_channels, False)
elif isinstance(inst, Evoked):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._sources_as_evoked(inst, add_channels)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
return sources
def _sources_as_raw(self, raw, add_channels, start, stop):
"""Aux method."""
# merge copied instance and picked data with sources
start, stop = _check_start_stop(raw, start, stop)
data_ = self._transform_raw(raw, start=start, stop=stop)
assert data_.shape[1] == stop - start
preloaded = raw.preload
if raw.preload:
# get data and temporarily delete
data = raw._data
raw.preload = False
del raw._data
# copy and crop here so that things like annotations are adjusted
try:
out = raw.copy().crop(
start / raw.info['sfreq'],
(stop - 1) / raw.info['sfreq'])
finally:
# put the data back (always)
if preloaded:
raw.preload = True
raw._data = data
# populate copied raw.
if add_channels is not None and len(add_channels):
picks = pick_channels(raw.ch_names, add_channels)
data_ = np.concatenate([
data_, raw.get_data(picks, start=start, stop=stop)])
out._data = data_
out._first_samps = [out.first_samp]
out._last_samps = [out.last_samp]
out._filenames = [None]
out.preload = True
out._projector = None
self._export_info(out.info, raw, add_channels)
return out
def _sources_as_epochs(self, epochs, add_channels, concatenate):
"""Aux method."""
out = epochs.copy()
sources = self._transform_epochs(epochs, concatenate)
if add_channels is not None:
picks = [epochs.ch_names.index(k) for k in add_channels]
else:
picks = []
out._data = np.concatenate([sources, epochs.get_data()[:, picks]],
axis=1) if len(picks) > 0 else sources
self._export_info(out.info, epochs, add_channels)
out.preload = True
out._raw = None
out._projector = None
return out
def _sources_as_evoked(self, evoked, add_channels):
"""Aux method."""
if add_channels is not None:
picks = [evoked.ch_names.index(k) for k in add_channels]
else:
picks = []
sources = self._transform_evoked(evoked)
if len(picks) > 1:
data = np.r_[sources, evoked.data[picks]]
else:
data = sources
out = evoked.copy()
out.data = data
self._export_info(out.info, evoked, add_channels)
return out
def _export_info(self, info, container, add_channels):
"""Aux method."""
# set channel names and info
ch_names = []
ch_info = []
for ii, name in enumerate(self._ica_names):
ch_names.append(name)
ch_info.append(dict(
ch_name=name, cal=1, logno=ii + 1,
coil_type=FIFF.FIFFV_COIL_NONE,
kind=FIFF.FIFFV_MISC_CH,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
unit=FIFF.FIFF_UNIT_NONE,
loc=np.zeros(12, dtype='f4'),
range=1.0, scanno=ii + 1, unit_mul=0))
if add_channels is not None:
# re-append additionally picked ch_names
ch_names += add_channels
# re-append additionally picked ch_info
ch_info += [k for k in container.info['chs'] if k['ch_name'] in
add_channels]
with info._unlock(update_redundant=True, check_after=True):
info['chs'] = ch_info
info['bads'] = [ch_names[k] for k in self.exclude]
info['projs'] = [] # make sure projections are removed.
@verbose
def score_sources(self, inst, target=None, score_func='pearsonr',
start=None, stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, verbose=None):
"""Assign score to components based on statistic or metric.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The object to reconstruct the sources from.
target : array-like | str | None
Signal to which the sources shall be compared. It has to be of
the same shape as the sources. If str, a routine will try to find
a matching channel name. If None, a score
function expecting only one input-array argument must be used,
for instance, scipy.stats.skew (default).
score_func : callable | str
Callable taking as arguments either two input arrays
(e.g. Pearson correlation) or one input
array (e. g. skewness) and returns a float. For convenience the
most common score_funcs are available via string labels:
Currently, all distance metrics from scipy.spatial and All
functions from scipy.stats taking compatible input arguments are
supported. These function have been modified to support iteration
over the rows of a 2D array.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
%(verbose)s
Returns
-------
scores : ndarray
Scores for each source as returned from score_func.
"""
if isinstance(inst, BaseRaw):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Raw',
ch_names=self.ch_names)
sources = self._transform_raw(inst, start, stop,
reject_by_annotation)
elif isinstance(inst, BaseEpochs):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Epochs',
ch_names=self.ch_names)
sources = self._transform_epochs(inst, concatenate=True)
elif isinstance(inst, Evoked):
_check_compensation_grade(self.info, inst.info, 'ICA', 'Evoked',
ch_names=self.ch_names)
sources = self._transform_evoked(inst)
else:
raise ValueError('Data input must be of Raw, Epochs or Evoked '
'type')
if target is not None: # we can have univariate metrics without target
target = self._check_target(target, inst, start, stop,
reject_by_annotation)
if sources.shape[-1] != target.shape[-1]:
raise ValueError('Sources and target do not have the same '
'number of time slices.')
# auto target selection
if isinstance(inst, BaseRaw):
# We pass inst, not self, because the sfreq of the data we
# use for scoring components can be different:
sources, target = _band_pass_filter(inst, sources, target,
l_freq, h_freq)
scores = _find_sources(sources, target, score_func)
return scores
def _check_target(self, target, inst, start, stop,
reject_by_annotation=False):
"""Aux Method."""
if isinstance(inst, BaseRaw):
reject_by_annotation = 'omit' if reject_by_annotation else None
start, stop = _check_start_stop(inst, start, stop)
if hasattr(target, 'ndim'):
if target.ndim < 2:
target = target.reshape(1, target.shape[-1])
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.get_data(pick, start, stop, reject_by_annotation)
elif isinstance(inst, BaseEpochs):
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.get_data()[:, pick]
if hasattr(target, 'ndim'):
if target.ndim == 3 and min(target.shape) == 1:
target = target.ravel()
elif isinstance(inst, Evoked):
if isinstance(target, str):
pick = _get_target_ch(inst, target)
target = inst.data[pick]
return target
def _find_bads_ch(self, inst, chs, threshold=3.0, start=None,
stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, prefix='chs',
measure='zscore'):
"""Compute ExG/ref components.
See find_bads_ecg, find_bads_eog, and find_bads_ref for details.
"""
scores, idx = [], []
# some magic we need inevitably ...
# get targets before equalizing
targets = [self._check_target(
ch, inst, start, stop, reject_by_annotation) for ch in chs]
# assign names, if targets are arrays instead of strings
target_names = []
for ch in chs:
if not isinstance(ch, str):
if prefix == "ecg":
target_names.append('ECG-MAG')
else:
target_names.append(prefix)
else:
target_names.append(ch)
for ii, (ch, target) in enumerate(zip(target_names, targets)):
scores += [self.score_sources(
inst, target=target, score_func='pearsonr', start=start,
stop=stop, l_freq=l_freq, h_freq=h_freq,
reject_by_annotation=reject_by_annotation)]
# pick last scores
if measure == "zscore":
this_idx = _find_outliers(scores[-1], threshold=threshold)
elif measure == "correlation":
this_idx = np.where(abs(scores[-1]) > threshold)[0]
else:
raise ValueError("Unknown measure {}".format(measure))
idx += [this_idx]
self.labels_['%s/%i/' % (prefix, ii) + ch] = list(this_idx)
# remove duplicates but keep order by score, even across multiple
# ref channels
scores_ = np.concatenate([scores[ii][inds]
for ii, inds in enumerate(idx)])
idx_ = np.concatenate(idx)[np.abs(scores_).argsort()[::-1]]
idx_unique = list(np.unique(idx_))
idx = []
for i in idx_:
if i in idx_unique:
idx.append(i)
idx_unique.remove(i)
if len(scores) == 1:
scores = scores[0]
labels = list(idx)
return labels, scores
def _get_ctps_threshold(self, pk_threshold=20):
"""Automatically decide the threshold of Kuiper index for CTPS method.
This function finds the threshold of Kuiper index based on the
threshold of pk. Kuiper statistic that minimizes the difference between
pk and the pk threshold (defaults to 20 :footcite:`DammersEtAl2008`)
is returned. It is assumed that the data are appropriately filtered and
bad data are rejected at least based on peak-to-peak amplitude
when/before running the ICA decomposition on data.
References
----------
.. footbibliography::
"""
N = self.info['sfreq']
Vs = np.arange(1, 100) / 100
C = math.sqrt(N) + 0.155 + 0.24 / math.sqrt(N)
# in formula (13), when k gets large, only k=1 matters for the
# summation. k*V*C thus becomes V*C
Pks = 2 * (4 * (Vs * C)**2 - 1) * (np.exp(-2 * (Vs * C)**2))
# NOTE: the threshold of pk is transformed to Pk for comparison
# pk = -log10(Pk)
return Vs[np.argmin(np.abs(Pks - 10**(-pk_threshold)))]
@verbose
def find_bads_ecg(self, inst, ch_name=None, threshold='auto', start=None,
stop=None, l_freq=8, h_freq=16, method='ctps',
reject_by_annotation=True, measure='zscore',
verbose=None):
"""Detect ECG related components.
Cross-trial phase statistics :footcite:`DammersEtAl2008` or Pearson
correlation can be used for detection.
.. note:: If no ECG channel is available, routine attempts to create
an artificial ECG based on cross-channel averaging.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for ECG peak detection.
The argument is mandatory if the dataset contains no ECG
channels.
threshold : float | 'auto'
Value above which a feature is classified as outlier. See Notes.
.. versionchanged:: 0.21
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
When working with Epochs or Evoked objects, must be float or None.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
When working with Epochs or Evoked objects, must be float or None.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
method : 'ctps' | 'correlation'
The method used for detection. If ``'ctps'``, cross-trial phase
statistics :footcite:`DammersEtAl2008` are used to detect
ECG-related components. See Notes.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
%(measure)s
%(verbose)s
Returns
-------
ecg_idx : list of int
The indices of ECG-related components.
scores : np.ndarray of float, shape (``n_components_``)
If method is 'ctps', the normalized Kuiper index scores. If method
is 'correlation', the correlation scores.
See Also
--------
find_bads_eog, find_bads_ref, find_bads_muscle
Notes
-----
The ``threshold``, ``method``, and ``measure`` parameters interact in
the following ways:
- If ``method='ctps'``, ``threshold`` refers to the significance value
of a Kuiper statistic, and ``threshold='auto'`` will compute the
threshold automatically based on the sampling frequency.
- If ``method='correlation'`` and ``measure='correlation'``,
``threshold`` refers to the Pearson correlation value, and
``threshold='auto'`` sets the threshold to 0.9.
- If ``method='correlation'`` and ``measure='zscore'``, ``threshold``
refers to the z-score value (i.e., standard deviations) used in the
iterative z-scoring method, and ``threshold='auto'`` sets the
threshold to 3.0.
References
----------
.. footbibliography::
"""
_validate_type(threshold, (str, 'numeric'), 'threshold')
if isinstance(threshold, str):
_check_option('threshold', threshold, ('auto',), extra='when str')
_validate_type(method, str, 'method')
_check_option('method', method, ('ctps', 'correlation'))
_validate_type(measure, str, 'measure')
_check_option('measure', measure, ('zscore', 'correlation'))
idx_ecg = _get_ecg_channel_index(ch_name, inst)
if idx_ecg is None:
ecg, times = _make_ecg(inst, start, stop,
reject_by_annotation=reject_by_annotation)
else:
ecg = inst.ch_names[idx_ecg]
if method == 'ctps':
if threshold == 'auto':
threshold = self._get_ctps_threshold()
logger.info('Using threshold: %.2f for CTPS ECG detection'
% threshold)
if isinstance(inst, BaseRaw):
sources = self.get_sources(create_ecg_epochs(
inst, ch_name, l_freq=l_freq, h_freq=h_freq,
keep_ecg=False,
reject_by_annotation=reject_by_annotation)).get_data()
if sources.shape[0] == 0:
warn('No ECG activity detected. Consider changing '
'the input parameters.')
elif isinstance(inst, BaseEpochs):
sources = self.get_sources(inst).get_data()
else:
raise ValueError('With `ctps` only Raw and Epochs input is '
'supported')
_, p_vals, _ = ctps(sources)
scores = p_vals.max(-1)
ecg_idx = np.where(scores >= threshold)[0]
# sort indices by scores
ecg_idx = ecg_idx[np.abs(scores[ecg_idx]).argsort()[::-1]]
self.labels_['ecg'] = list(ecg_idx)
if ch_name is None:
ch_name = 'ECG-MAG'
self.labels_['ecg/%s' % ch_name] = list(ecg_idx)
elif method == 'correlation':
if threshold == 'auto' and measure == 'zscore':
threshold = 3.0
elif threshold == 'auto' and measure == 'correlation':
threshold = 0.9
self.labels_['ecg'], scores = self._find_bads_ch(
inst, [ecg], threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix="ecg",
reject_by_annotation=reject_by_annotation, measure=measure)
return self.labels_['ecg'], scores
@verbose
def find_bads_ref(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=None, h_freq=None,
reject_by_annotation=True, method='together',
measure="zscore", verbose=None):
"""Detect MEG reference related components using correlation.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from. Should contain at least one channel
i.e. component derived from MEG reference channels.
ch_name : list of str
Which MEG reference components to use. If None, then all channels
that begin with REF_ICA.
threshold : float | str
Value above which a feature is classified as outlier.
- If ``measure`` is ``'zscore'``, defines the threshold on the
z-score used in the iterative z-scoring method.
- If ``measure`` is ``'correlation'``, defines the absolute
threshold on the correlation between 0 and 1.
- If ``'auto'``, defaults to 3.0 if ``measure`` is ``'zscore'`` and
0.9 if ``measure`` is ``'correlation'``.
.. warning::
If ``method`` is ``'together'``, the iterative z-score method
is always used.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
method : 'together' | 'separate'
Method to use to identify reference channel related components.
Defaults to ``'together'``. See notes.
.. versionadded:: 0.21
%(measure)s
%(verbose)s
Returns
-------
ref_idx : list of int
The indices of MEG reference related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_eog, find_bads_muscle
Notes
-----
ICA decomposition on MEG reference channels is used to assess external
magnetic noise and remove it from the MEG. Two methods are supported:
With the ``'together'`` method, only one ICA fit is used, which
encompasses both MEG and reference channels together. Components which
have particularly strong weights on the reference channels may be
thresholded and marked for removal.
With ``'separate'`` selected components from a separate ICA
decomposition on the reference channels are used as a ground truth for
identifying bad components in an ICA fit done on MEG channels only. The
logic here is similar to an EOG/ECG, with reference components
replacing the EOG/ECG channels. Recommended procedure is to perform ICA
separately on reference channels, extract them using
:meth:`~mne.preprocessing.ICA.get_sources`, and then append them to the
inst using :meth:`~mne.io.Raw.add_channels`, preferably with the prefix
``REF_ICA`` so that they can be automatically detected.
With ``'together'``, thresholding is based on adaptative z-scoring.
With ``'separate'``:
- If ``measure`` is ``'zscore'``, thresholding is based on adaptative
z-scoring.
- If ``measure`` is ``'correlation'``, threshold defines the absolute
threshold on the correlation between 0 and 1.
Validation and further documentation for this technique can be found
in :footcite:`HannaEtAl2020`.
.. versionadded:: 0.18
References
----------
.. footbibliography::
"""
_validate_type(threshold, (str, 'numeric'), 'threshold')
if isinstance(threshold, str):
_check_option('threshold', threshold, ('auto',), extra='when str')
_validate_type(method, str, 'method')
_check_option('method', method, ('together', 'separate'))
_validate_type(measure, str, 'measure')
_check_option('measure', measure, ('zscore', 'correlation'))
if method == "separate":
if threshold == 'auto' and measure == 'zscore':
threshold = 3.0
elif threshold == 'auto' and measure == 'correlation':
threshold = 0.9
if not ch_name:
inds = pick_channels_regexp(inst.ch_names, 'REF_ICA*')
else:
inds = pick_channels(inst.ch_names, ch_name)
# regexp returns list, pick_channels returns numpy
inds = list(inds)
if not inds:
raise ValueError('No valid channels available.')
ref_chs = [inst.ch_names[k] for k in inds]
self.labels_['ref_meg'], scores = self._find_bads_ch(
inst, ref_chs, threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix='ref_meg',
reject_by_annotation=reject_by_annotation,
measure=measure)
elif method == 'together':
if threshold == 'auto':
threshold = 3.0
if measure != 'zscore':
logger.info(
"With method 'together', only 'zscore' measure is"
f"supported. Using 'zscore' instead of '{measure}'.")
meg_picks = pick_types(self.info, meg=True, ref_meg=False)
ref_picks = pick_types(self.info, meg=False, ref_meg=True)
if not any(meg_picks) or not any(ref_picks):
raise ValueError('ICA solution must contain both reference and'
' MEG channels.')
weights = self.get_components()
# take norm of component weights on reference channels for each
# component, divide them by the norm on the standard channels,
# log transform to approximate normal distribution
normrats = np.linalg.norm(weights[ref_picks], axis=0) \
/ np.linalg.norm(weights[meg_picks], axis=0)
scores = np.log(normrats)
self.labels_['ref_meg'] = list(_find_outliers(
scores, threshold=threshold, tail=1))
return self.labels_['ref_meg'], scores
@verbose
def find_bads_muscle(self, inst, threshold=0.5, start=None,
stop=None, l_freq=7, h_freq=45, sphere=None,
verbose=None):
"""Detect muscle related components.
Detection is based on :footcite:`DharmapraniEtAl2016` which uses
data from a subject who has been temporarily paralyzed
:footcite:`WhithamEtAl2007`. The criteria are threefold:
1) Positive log-log spectral slope from 7 to 45 Hz
2) Peripheral component power (farthest away from the vertex)
3) A single focal point measured by low spatial smoothness
The threshold is relative to the slope, focal point and smoothness
of a typical muscle-related ICA component. Note the high frequency
of the power spectral density slope was 75 Hz in the reference but
has been modified to 45 Hz as a default based on the criteria being
more accurate in practice.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
threshold : float | str
Value above which a component should be marked as muscle-related,
relative to a typical muscle component.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low frequency for muscle-related power.
h_freq : float
High frequency for msucle related power.
%(sphere_topomap_auto)s
%(verbose)s
Returns
-------
muscle_idx : list of int
The indices of EOG related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_eog, find_bads_ref
Notes
-----
.. versionadded:: 1.1
"""
from scipy.spatial.distance import pdist, squareform
_validate_type(threshold, 'numeric', 'threshold')
sources = self.get_sources(inst, start=start, stop=stop)
components = self.get_components()
# compute metric #1: slope of the log-log psd
spectrum = sources.compute_psd(fmin=l_freq, fmax=h_freq, picks='misc')
psds, freqs = spectrum.get_data(return_freqs=True)
if psds.ndim > 2:
psds = psds.mean(axis=0)
slopes = np.polyfit(np.log10(freqs), np.log10(psds).T, 1)[0]
# compute metric #2: distance from the vertex of focus
components_norm = abs(components) / np.max(abs(components), axis=0)
# we need to retrieve the position from the channels that were used to
# fit the ICA. N.B: picks in _find_topomap_coords includes bad channels
# even if they are not provided explicitly.
pos = _find_topomap_coords(
inst.info, picks=self.ch_names, sphere=sphere, ignore_overlap=True
)
assert pos.shape[0] == components.shape[0] # pos for each sensor
pos -= pos.mean(axis=0) # center
dists = np.linalg.norm(pos, axis=1)
dists /= dists.max()
focus_dists = np.dot(dists, components_norm)
# compute metric #3: smoothness
smoothnesses = np.zeros((components.shape[1],))
dists = squareform(pdist(pos))
dists = 1 - (dists / dists.max()) # invert
for idx, comp in enumerate(components.T):
comp_dists = squareform(pdist(comp[:, np.newaxis]))
comp_dists /= comp_dists.max()
smoothnesses[idx] = np.multiply(dists, comp_dists).sum()
# typical muscle slope is ~0.15, non-muscle components negative
# so logistic with shift -0.5 and slope 0.25 so -0.5 -> 0.5 and 0->1
# focus distance is ~65% of max electrode distance with 10% slope
# (assumes typical head size)
# smoothnessness is around 150 for muscle and 450 otherwise
# so use reversed logistic centered at 300 with 100 slope
# multiply so that all three components must be present
scores = (1 / (1 + np.exp(-(slopes + 0.5) / 0.25))) * \
(1 / (1 + np.exp(-(focus_dists - 0.65) / 0.1))) * \
(1 - (1 / (1 + np.exp(-(smoothnesses - 300) / 100))))
# scale the threshold by the use of three metrics
self.labels_['muscle'] = [idx for idx, score in enumerate(scores)
if score > threshold**3]
return self.labels_['muscle'], scores
@verbose
def find_bads_eog(self, inst, ch_name=None, threshold=3.0, start=None,
stop=None, l_freq=1, h_freq=10,
reject_by_annotation=True, measure='zscore',
verbose=None):
"""Detect EOG related components using correlation.
Detection is based on Pearson correlation between the
filtered data and the filtered EOG channel.
Thresholding is based on adaptive z-scoring. The above threshold
components will be masked and the z-score will be recomputed
until no supra-threshold component remains.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
Object to compute sources from.
ch_name : str
The name of the channel to use for EOG peak detection.
The argument is mandatory if the dataset contains no EOG
channels.
threshold : float | str
Value above which a feature is classified as outlier.
- If ``measure`` is ``'zscore'``, defines the threshold on the
z-score used in the iterative z-scoring method.
- If ``measure`` is ``'correlation'``, defines the absolute
threshold on the correlation between 0 and 1.
- If ``'auto'``, defaults to 3.0 if ``measure`` is ``'zscore'`` and
0.9 if ``measure`` is ``'correlation'``.
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
%(reject_by_annotation_all)s
.. versionadded:: 0.14.0
%(measure)s
%(verbose)s
Returns
-------
eog_idx : list of int
The indices of EOG related components, sorted by score.
scores : np.ndarray of float, shape (``n_components_``) | list of array
The correlation scores.
See Also
--------
find_bads_ecg, find_bads_ref
"""
_validate_type(threshold, (str, 'numeric'), 'threshold')
if isinstance(threshold, str):
_check_option('threshold', threshold, ('auto',), extra='when str')
_validate_type(measure, str, 'measure')
_check_option('measure', measure, ('zscore', 'correlation'))
eog_inds = _get_eog_channel_index(ch_name, inst)
eog_chs = [inst.ch_names[k] for k in eog_inds]
if threshold == 'auto' and measure == 'zscore':
threshold = 3.0
elif threshold == 'auto' and measure == 'correlation':
threshold = 0.9
self.labels_['eog'], scores = self._find_bads_ch(
inst, eog_chs, threshold=threshold, start=start, stop=stop,
l_freq=l_freq, h_freq=h_freq, prefix="eog",
reject_by_annotation=reject_by_annotation, measure=measure)
return self.labels_['eog'], scores
@verbose
def apply(self, inst, include=None, exclude=None, n_pca_components=None,
start=None, stop=None, *, on_baseline='warn', verbose=None):
"""Remove selected components from the signal.
Given the unmixing matrix, transform the data,
zero out all excluded components, and inverse-transform the data.
This procedure will reconstruct M/EEG signals from which
the dynamics described by the excluded components is subtracted.
Parameters
----------
inst : instance of Raw, Epochs or Evoked
The data to be processed (i.e., cleaned). It will be modified
in-place.
include : array_like of int
The indices referring to columns in the ummixing matrix. The
components to be kept. If ``None`` (default), all components
will be included (minus those defined in ``ica.exclude``
and the ``exclude`` parameter, see below).
exclude : array_like of int
The indices referring to columns in the ummixing matrix. The
components to be zeroed out. If ``None`` (default) or an
empty list, only components from ``ica.exclude`` will be
excluded. Else, the union of ``exclude`` and ``ica.exclude``
will be excluded.
%(n_pca_components_apply)s
start : int | float | None
First sample to include. If float, data will be interpreted as
time in seconds. If None, data will be used from the first sample.
stop : int | float | None
Last sample to not include. If float, data will be interpreted as
time in seconds. If None, data will be used to the last sample.
%(on_baseline_ica)s
%(verbose)s
Returns
-------
out : instance of Raw, Epochs or Evoked
The processed data.
Notes
-----
.. note:: Applying ICA may introduce a DC shift. If you pass
baseline-corrected `~mne.Epochs` or `~mne.Evoked` data,
the baseline period of the cleaned data may not be of
zero mean anymore. If you require baseline-corrected
data, apply baseline correction again after cleaning
via ICA. A warning will be emitted to remind you of this
fact if you pass baseline-corrected data.
.. versionchanged:: 0.23
Warn if instance was baseline-corrected.
"""
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked), 'inst',
'Raw, Epochs, or Evoked')
kwargs = dict(include=include, exclude=exclude,
n_pca_components=n_pca_components)
if isinstance(inst, BaseRaw):
kind, meth = 'Raw', self._apply_raw
kwargs.update(raw=inst, start=start, stop=stop)
elif isinstance(inst, BaseEpochs):
kind, meth = 'Epochs', self._apply_epochs
kwargs.update(epochs=inst)
else: # isinstance(inst, Evoked):
kind, meth = 'Evoked', self._apply_evoked
kwargs.update(evoked=inst)
_check_compensation_grade(self.info, inst.info, 'ICA', kind,
ch_names=self.ch_names)
_check_on_missing(on_baseline, 'on_baseline', extras=('reapply',))
reapply_baseline = False
if isinstance(inst, (BaseEpochs, Evoked)):
if getattr(inst, 'baseline', None) is not None:
if on_baseline == 'reapply':
reapply_baseline = True
else:
msg = (
'The data you passed to ICA.apply() was '
'baseline-corrected. Please note that ICA can '
'introduce DC shifts, therefore you may wish to '
'consider baseline-correcting the cleaned data again.'
)
_on_missing(on_baseline, msg, 'on_baseline')
logger.info(f'Applying ICA to {kind} instance')
out = meth(**kwargs)
if reapply_baseline:
out.apply_baseline(inst.baseline)
return out
def _check_exclude(self, exclude):
if exclude is None:
return list(set(self.exclude))
else:
# Allow both self.exclude and exclude to be array-like:
return list(set(self.exclude).union(set(exclude)))
def _apply_raw(self, raw, include, exclude, n_pca_components, start, stop):
"""Aux method."""
_check_preload(raw, "ica.apply")
start, stop = _check_start_stop(raw, start, stop)
picks = pick_types(raw.info, meg=False, include=self.ch_names,
exclude='bads', ref_meg=False)
data = raw[picks, start:stop][0]
data = self._pick_sources(data, include, exclude, n_pca_components)
raw[picks, start:stop] = data
return raw
def _apply_epochs(self, epochs, include, exclude, n_pca_components):
"""Aux method."""
_check_preload(epochs, "ica.apply")
picks = pick_types(epochs.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where epochs come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Epochs don\'t match fitted data: %i channels '
'fitted but %i channels supplied. \nPlease '
'provide Epochs compatible with '
'ica.ch_names' % (len(self.ch_names),
len(picks)))
data = np.hstack(epochs.get_data(picks))
data = self._pick_sources(data, include, exclude, n_pca_components)
# restore epochs, channels, tsl order
epochs._data[:, picks] = np.array(
np.split(data, len(epochs.events), 1))
epochs.preload = True
return epochs
def _apply_evoked(self, evoked, include, exclude, n_pca_components):
"""Aux method."""
picks = pick_types(evoked.info, meg=False, ref_meg=False,
include=self.ch_names,
exclude='bads')
# special case where evoked come picked but fit was 'unpicked'.
if len(picks) != len(self.ch_names):
raise RuntimeError('Evoked does not match fitted data: %i channels'
' fitted but %i channels supplied. \nPlease '
'provide an Evoked object that\'s compatible '
'with ica.ch_names' % (len(self.ch_names),
len(picks)))
data = evoked.data[picks]
data = self._pick_sources(data, include, exclude, n_pca_components)
# restore evoked
evoked.data[picks] = data
return evoked
def _pick_sources(self, data, include, exclude, n_pca_components):
"""Aux function."""
if n_pca_components is None:
n_pca_components = self.n_pca_components
data = self._pre_whiten(data)
exclude = self._check_exclude(exclude)
_n_pca_comp = self._check_n_pca_components(n_pca_components)
n_ch, _ = data.shape
max_pca_components = self.pca_components_.shape[0]
if not self.n_components_ <= _n_pca_comp <= max_pca_components:
raise ValueError(
f'n_pca_components ({_n_pca_comp}) must be >= '
f'n_components_ ({self.n_components_}) and <= '
'the total number of PCA components '
f'({max_pca_components}).')
logger.info(f' Transforming to ICA space ({self.n_components_} '
f'component{_pl(self.n_components_)})')
# Apply first PCA
if self.pca_mean_ is not None:
data -= self.pca_mean_[:, None]
sel_keep = np.arange(self.n_components_)
if include not in (None, []):
sel_keep = np.unique(include)
elif exclude not in (None, []):
sel_keep = np.setdiff1d(np.arange(self.n_components_), exclude)
n_zero = self.n_components_ - len(sel_keep)
logger.info(f' Zeroing out {n_zero} ICA component{_pl(n_zero)}')
# Mixing and unmixing should both be shape (self.n_components_, 2),
# and we need to put these into the upper left part of larger mixing
# and unmixing matrices of shape (n_ch, _n_pca_comp)
pca_components = self.pca_components_[:_n_pca_comp]
assert pca_components.shape == (_n_pca_comp, n_ch)
assert self.unmixing_matrix_.shape == \
self.mixing_matrix_.shape == \
(self.n_components_,) * 2
unmixing = np.eye(_n_pca_comp)
unmixing[:self.n_components_, :self.n_components_] = \
self.unmixing_matrix_
unmixing = np.dot(unmixing, pca_components)
logger.info(f' Projecting back using {_n_pca_comp} '
f'PCA component{_pl(_n_pca_comp)}')
mixing = np.eye(_n_pca_comp)
mixing[:self.n_components_, :self.n_components_] = \
self.mixing_matrix_
mixing = pca_components.T @ mixing
assert mixing.shape == unmixing.shape[::-1] == (n_ch, _n_pca_comp)
# keep requested components plus residuals (if any)
sel_keep = np.concatenate(
(sel_keep, np.arange(self.n_components_, _n_pca_comp)))
proj_mat = np.dot(mixing[:, sel_keep], unmixing[sel_keep, :])
data = np.dot(proj_mat, data)
assert proj_mat.shape == (n_ch,) * 2
if self.pca_mean_ is not None:
data += self.pca_mean_[:, None]
# restore scaling
if self.noise_cov is None: # revert standardization
data *= self.pre_whitener_
else:
data = np.linalg.pinv(self.pre_whitener_, rcond=1e-14) @ data
return data
@verbose
def save(self, fname, *, overwrite=False, verbose=None):
"""Store ICA solution into a fiff file.
Parameters
----------
fname : str
The absolute path of the file name to save the ICA solution into.
The file name should end with -ica.fif or -ica.fif.gz.
%(overwrite)s
.. versionadded:: 1.0
%(verbose)s
Returns
-------
ica : instance of ICA
The object.
See Also
--------
read_ica
"""
if self.current_fit == 'unfitted':
raise RuntimeError('No fit available. Please first fit ICA')
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
fname = _check_fname(fname, overwrite=overwrite)
logger.info('Writing ICA solution to %s...' % fname)
with start_and_end_file(fname) as fid:
_write_ica(fid, self)
return self
def copy(self):
"""Copy the ICA object.
Returns
-------
ica : instance of ICA
The copied object.
"""
return deepcopy(self)
@copy_function_doc_to_method_doc(plot_ica_components)
def plot_components(self, picks=None, ch_type=None, res=64,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=False, title=None, show=True, outlines='head',
contours=6, image_interp=_INTERPOLATION_DEFAULT,
inst=None, plot_std=True,
topomap_args=None, image_args=None, psd_args=None,
reject='auto', sphere=None, verbose=None):
return plot_ica_components(self, picks=picks, ch_type=ch_type,
res=res, vmin=vmin,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title, show=show,
outlines=outlines, contours=contours,
image_interp=image_interp,
inst=inst, plot_std=plot_std,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
reject=reject, sphere=sphere,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_ica_properties)
def plot_properties(self, inst, picks=None, axes=None, dB=True,
plot_std=True, log_scale=False, topomap_args=None,
image_args=None, psd_args=None, figsize=None,
show=True, reject='auto', reject_by_annotation=True,
*, verbose=None):
return plot_ica_properties(self, inst, picks=picks, axes=axes,
dB=dB, plot_std=plot_std,
log_scale=log_scale,
topomap_args=topomap_args,
image_args=image_args, psd_args=psd_args,
figsize=figsize, show=show, reject=reject,
reject_by_annotation=reject_by_annotation,
verbose=verbose)
@copy_function_doc_to_method_doc(plot_ica_sources)
def plot_sources(self, inst, picks=None, start=None,
stop=None, title=None, show=True, block=False,
show_first_samp=False, show_scrollbars=True,
time_format='float', precompute=None,
use_opengl=None, *, theme=None, overview_mode=None):
return plot_ica_sources(self, inst=inst, picks=picks,
start=start, stop=stop, title=title, show=show,
block=block, show_first_samp=show_first_samp,
show_scrollbars=show_scrollbars,
time_format=time_format,
precompute=precompute, use_opengl=use_opengl,
theme=theme, overview_mode=overview_mode)
@copy_function_doc_to_method_doc(plot_ica_scores)
def plot_scores(self, scores, exclude=None, labels=None, axhline=None,
title='ICA component scores', figsize=None, n_cols=None,
show=True):
return plot_ica_scores(
ica=self, scores=scores, exclude=exclude, labels=labels,
axhline=axhline, title=title, figsize=figsize, n_cols=n_cols,
show=show)
@copy_function_doc_to_method_doc(plot_ica_overlay)
def plot_overlay(self, inst, exclude=None, picks=None, start=None,
stop=None, title=None, show=True, n_pca_components=None,
*, on_baseline='warn', verbose=None):
return plot_ica_overlay(self, inst=inst, exclude=exclude, picks=picks,
start=start, stop=stop, title=title, show=show,
n_pca_components=n_pca_components,
on_baseline=on_baseline, verbose=verbose)
@verbose
def _check_n_pca_components(self, _n_pca_comp, verbose=None):
"""Aux function."""
if isinstance(_n_pca_comp, float):
n, ev = _exp_var_ncomp(
self.pca_explained_variance_, _n_pca_comp)
logger.info(f' Selected {n} PCA components by explained '
f'variance ({100 * ev}≥{100 * _n_pca_comp}%)')
_n_pca_comp = n
elif _n_pca_comp is None:
_n_pca_comp = self._max_pca_components
if _n_pca_comp is None:
_n_pca_comp = self.pca_components_.shape[0]
elif _n_pca_comp < self.n_components_:
_n_pca_comp = self.n_components_
return _n_pca_comp
def _exp_var_ncomp(var, n):
cvar = np.asarray(var, dtype=np.float64)
cvar = cvar.cumsum()
cvar /= cvar[-1]
# We allow 1., which would give us N+1
n = min((cvar <= n).sum() + 1, len(cvar))
return n, cvar[n - 1]
def _check_start_stop(raw, start, stop):
"""Aux function."""
out = list()
for st, none_ in ((start, 0), (stop, raw.n_times)):
if st is None:
out.append(none_)
else:
try:
out.append(_ensure_int(st))
except TypeError: # not int-like
out.append(raw.time_as_index(st)[0])
return out
@verbose
def ica_find_ecg_events(raw, ecg_source, event_id=999,
tstart=0.0, l_freq=5, h_freq=35, qrs_threshold='auto',
verbose=None):
"""Find ECG peaks from one selected ICA source.
Parameters
----------
raw : instance of Raw
Raw object to draw sources from.
ecg_source : ndarray
ICA source resembling ECG to find peaks from.
event_id : int
The index to assign to found events.
tstart : float
Start detection after tstart seconds. Useful when beginning
of run is noisy.
l_freq : float
Low pass frequency.
h_freq : float
High pass frequency.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
%(verbose)s
Returns
-------
ecg_events : array
Events.
ch_ECG : string
Name of channel used.
average_pulse : float.
Estimated average pulse.
"""
logger.info('Using ICA source to identify heart beats')
# detecting QRS and generating event file
ecg_events = qrs_detector(raw.info['sfreq'], ecg_source.ravel(),
tstart=tstart, thresh_value=qrs_threshold,
l_freq=l_freq, h_freq=h_freq)
n_events = len(ecg_events)
ecg_events = np.c_[ecg_events + raw.first_samp, np.zeros(n_events),
event_id * np.ones(n_events)]
return ecg_events
@verbose
def ica_find_eog_events(raw, eog_source=None, event_id=998, l_freq=1,
h_freq=10, verbose=None):
"""Locate EOG artifacts from one selected ICA source.
Parameters
----------
raw : instance of Raw
The raw data.
eog_source : ndarray
ICA source resembling EOG to find peaks from.
event_id : int
The index to assign to found events.
l_freq : float
Low cut-off frequency in Hz.
h_freq : float
High cut-off frequency in Hz.
%(verbose)s
Returns
-------
eog_events : array
Events.
"""
eog_events = _find_eog_events(eog_source[np.newaxis], event_id=event_id,
l_freq=l_freq, h_freq=h_freq,
sampling_rate=raw.info['sfreq'],
first_samp=raw.first_samp)
return eog_events
def _get_target_ch(container, target):
"""Aux function."""
# auto target selection
picks = pick_channels(container.ch_names, include=[target])
ref_picks = pick_types(container.info, meg=False, eeg=False, ref_meg=True)
if len(ref_picks) > 0:
picks = list(set(picks) - set(ref_picks))
if len(picks) == 0:
raise ValueError('%s not in channel list (%s)' %
(target, container.ch_names))
return picks
def _find_sources(sources, target, score_func):
"""Aux function."""
if isinstance(score_func, str):
score_func = get_score_funcs().get(score_func, score_func)
if not callable(score_func):
raise ValueError('%s is not a valid score_func.' % score_func)
scores = (score_func(sources, target) if target is not None
else score_func(sources, 1))
return scores
def _ica_explained_variance(ica, inst, normalize=False):
"""Check variance accounted for by each component in supplied data.
This function is only used for sorting the components.
Parameters
----------
ica : ICA
Instance of `mne.preprocessing.ICA`.
inst : Raw | Epochs | Evoked
Data to explain with ICA. Instance of Raw, Epochs or Evoked.
normalize : bool
Whether to normalize the variance.
Returns
-------
var : array
Variance explained by each component.
"""
# check if ica is ICA and whether inst is Raw or Epochs
if not isinstance(ica, ICA):
raise TypeError('first argument must be an instance of ICA.')
if not isinstance(inst, (BaseRaw, BaseEpochs, Evoked)):
raise TypeError('second argument must an instance of either Raw, '
'Epochs or Evoked.')
source_data = _get_inst_data(ica.get_sources(inst))
# if epochs - reshape to channels x timesamples
if isinstance(inst, BaseEpochs):
n_epochs, n_chan, n_samp = source_data.shape
source_data = source_data.transpose(1, 0, 2).reshape(
(n_chan, n_epochs * n_samp))
n_chan, n_samp = source_data.shape
var = np.sum(ica.mixing_matrix_ ** 2, axis=0) * np.sum(
source_data ** 2, axis=1) / (n_chan * n_samp - 1)
if normalize:
var /= var.sum()
return var
def _sort_components(ica, order, copy=True):
"""Change the order of components in ica solution."""
assert ica.n_components_ == len(order)
if copy:
ica = ica.copy()
# reorder components
ica.mixing_matrix_ = ica.mixing_matrix_[:, order]
ica.unmixing_matrix_ = ica.unmixing_matrix_[order, :]
# reorder labels, excludes etc.
if isinstance(order, np.ndarray):
order = list(order)
if ica.exclude:
ica.exclude = [order.index(ic) for ic in ica.exclude]
for k in ica.labels_.keys():
ica.labels_[k] = [order.index(ic) for ic in ica.labels_[k]]
return ica
def _serialize(dict_, outer_sep=';', inner_sep=':'):
"""Aux function."""
s = []
for key, value in dict_.items():
if callable(value):
value = value.__name__
elif isinstance(value, Integral):
value = int(value)
elif isinstance(value, dict):
# py35 json does not support numpy int64
for subkey, subvalue in value.items():
if isinstance(subvalue, list):
if len(subvalue) > 0:
if isinstance(subvalue[0], (int, np.integer)):
value[subkey] = [int(i) for i in subvalue]
for cls in (np.random.RandomState, Covariance):
if isinstance(value, cls):
value = cls.__name__
s.append(key + inner_sep + json.dumps(value))
return outer_sep.join(s)
def _deserialize(str_, outer_sep=';', inner_sep=':'):
"""Aux Function."""
out = {}
for mapping in str_.split(outer_sep):
k, v = mapping.split(inner_sep, 1)
out[k] = json.loads(v)
return out
def _write_ica(fid, ica):
"""Write an ICA object.
Parameters
----------
fid: file
The file descriptor
ica:
The instance of ICA to write
"""
ica_init = dict(noise_cov=ica.noise_cov,
n_components=ica.n_components,
n_pca_components=ica.n_pca_components,
max_pca_components=ica._max_pca_components,
current_fit=ica.current_fit,
allow_ref_meg=ica.allow_ref_meg)
if ica.info is not None:
start_block(fid, FIFF.FIFFB_MEAS)
write_id(fid, FIFF.FIFF_BLOCK_ID)
if ica.info['meas_id'] is not None:
write_id(fid, FIFF.FIFF_PARENT_BLOCK_ID, ica.info['meas_id'])
# Write measurement info
write_meas_info(fid, ica.info)
end_block(fid, FIFF.FIFFB_MEAS)
start_block(fid, FIFF.FIFFB_MNE_ICA)
# ICA interface params
write_string(fid, FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS,
_serialize(ica_init))
# Channel names
if ica.ch_names is not None:
write_name_list(fid, FIFF.FIFF_MNE_ROW_NAMES, ica.ch_names)
# samples on fit
n_samples = getattr(ica, 'n_samples_', None)
ica_misc = {'n_samples_': (None if n_samples is None else int(n_samples)),
'labels_': getattr(ica, 'labels_', None),
'method': getattr(ica, 'method', None),
'n_iter_': getattr(ica, 'n_iter_', None),
'fit_params': getattr(ica, 'fit_params', None)}
# ICA misc params
write_string(fid, FIFF.FIFF_MNE_ICA_MISC_PARAMS,
_serialize(ica_misc))
# Whitener
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_WHITENER, ica.pre_whitener_)
# PCA components_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_COMPONENTS,
ica.pca_components_)
# PCA mean_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_MEAN, ica.pca_mean_)
# PCA explained_variance_
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR,
ica.pca_explained_variance_)
# ICA unmixing
write_double_matrix(fid, FIFF.FIFF_MNE_ICA_MATRIX, ica.unmixing_matrix_)
# Write bad components
write_int(fid, FIFF.FIFF_MNE_ICA_BADS, list(ica.exclude))
# Write reject_
if ica.reject_ is not None:
write_string(fid, FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT,
json.dumps(dict(reject=ica.reject_)))
# Done!
end_block(fid, FIFF.FIFFB_MNE_ICA)
@verbose
def read_ica(fname, verbose=None):
"""Restore ICA solution from fif file.
Parameters
----------
fname : str
Absolute path to fif file containing ICA matrices.
The file name should end with -ica.fif or -ica.fif.gz.
%(verbose)s
Returns
-------
ica : instance of ICA
The ICA estimator.
"""
check_fname(fname, 'ICA', ('-ica.fif', '-ica.fif.gz',
'_ica.fif', '_ica.fif.gz'))
logger.info('Reading %s ...' % fname)
fid, tree, _ = fiff_open(fname)
try:
# we used to store bads that weren't part of the info...
info, _ = read_meas_info(fid, tree, clean_bads=True)
except ValueError:
logger.info('Could not find the measurement info. \n'
'Functionality requiring the info won\'t be'
' available.')
info = None
ica_data = dir_tree_find(tree, FIFF.FIFFB_MNE_ICA)
if len(ica_data) == 0:
ica_data = dir_tree_find(tree, 123) # Constant 123 Used before v 0.11
if len(ica_data) == 0:
fid.close()
raise ValueError('Could not find ICA data')
my_ica_data = ica_data[0]
ica_reject = None
for d in my_ica_data['directory']:
kind = d.kind
pos = d.pos
if kind == FIFF.FIFF_MNE_ICA_INTERFACE_PARAMS:
tag = read_tag(fid, pos)
ica_init = tag.data
elif kind == FIFF.FIFF_MNE_ROW_NAMES:
tag = read_tag(fid, pos)
ch_names = tag.data
elif kind == FIFF.FIFF_MNE_ICA_WHITENER:
tag = read_tag(fid, pos)
pre_whitener = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_COMPONENTS:
tag = read_tag(fid, pos)
pca_components = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_EXPLAINED_VAR:
tag = read_tag(fid, pos)
pca_explained_variance = tag.data
elif kind == FIFF.FIFF_MNE_ICA_PCA_MEAN:
tag = read_tag(fid, pos)
pca_mean = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MATRIX:
tag = read_tag(fid, pos)
unmixing_matrix = tag.data
elif kind == FIFF.FIFF_MNE_ICA_BADS:
tag = read_tag(fid, pos)
exclude = tag.data
elif kind == FIFF.FIFF_MNE_ICA_MISC_PARAMS:
tag = read_tag(fid, pos)
ica_misc = tag.data
elif kind == FIFF.FIFF_MNE_EPOCHS_REJECT_FLAT:
tag = read_tag(fid, pos)
ica_reject = json.loads(tag.data)['reject']
fid.close()
ica_init, ica_misc = [_deserialize(k) for k in (ica_init, ica_misc)]
n_pca_components = ica_init.pop('n_pca_components')
current_fit = ica_init.pop('current_fit')
max_pca_components = ica_init.pop('max_pca_components')
method = ica_misc.get('method', 'fastica')
if method in _KNOWN_ICA_METHODS:
ica_init['method'] = method
if ica_init['noise_cov'] == Covariance.__name__:
logger.info('Reading whitener drawn from noise covariance ...')
logger.info('Now restoring ICA solution ...')
# make sure dtypes are np.float64 to satisfy fast_dot
def f(x):
return x.astype(np.float64)
ica_init = {k: v for k, v in ica_init.items()
if k in signature(ICA.__init__).parameters}
ica = ICA(**ica_init)
ica.current_fit = current_fit
ica.ch_names = ch_names.split(':')
if n_pca_components is not None and \
not isinstance(n_pca_components, int_like):
n_pca_components = np.float64(n_pca_components)
ica.n_pca_components = n_pca_components
ica.pre_whitener_ = f(pre_whitener)
ica.pca_mean_ = f(pca_mean)
ica.pca_components_ = f(pca_components)
ica.n_components_ = unmixing_matrix.shape[0]
ica._max_pca_components = max_pca_components
ica._update_ica_names()
ica.pca_explained_variance_ = f(pca_explained_variance)
ica.unmixing_matrix_ = f(unmixing_matrix)
ica._update_mixing_matrix()
ica.exclude = [] if exclude is None else list(exclude)
ica.info = info
if 'n_samples_' in ica_misc:
ica.n_samples_ = ica_misc['n_samples_']
if 'labels_' in ica_misc:
labels_ = ica_misc['labels_']
if labels_ is not None:
ica.labels_ = labels_
if 'method' in ica_misc:
ica.method = ica_misc['method']
if 'n_iter_' in ica_misc:
ica.n_iter_ = ica_misc['n_iter_']
if 'fit_params' in ica_misc:
ica.fit_params = ica_misc['fit_params']
ica.reject_ = ica_reject
logger.info('Ready.')
return ica
_ica_node = namedtuple('Node', 'name target score_func criterion')
@verbose
def _band_pass_filter(inst, sources, target, l_freq, h_freq, verbose=None):
"""Optionally band-pass filter the data."""
if l_freq is not None and h_freq is not None:
logger.info('... filtering ICA sources')
# use FIR here, steeper is better
kw = dict(phase='zero-double', filter_length='10s', fir_window='hann',
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
fir_design='firwin2')
sources = filter_data(sources, inst.info['sfreq'], l_freq, h_freq,
**kw)
logger.info('... filtering target')
target = filter_data(target, inst.info['sfreq'], l_freq, h_freq, **kw)
elif l_freq is not None or h_freq is not None:
raise ValueError('Must specify both pass bands')
return sources, target
# #############################################################################
# CORRMAP
def _find_max_corrs(all_maps, target, threshold):
"""Compute correlations between template and target components."""
all_corrs = [compute_corr(target, subj.T) for subj in all_maps]
abs_corrs = [np.abs(a) for a in all_corrs]
corr_polarities = [np.sign(a) for a in all_corrs]
if threshold <= 1:
max_corrs = [list(np.nonzero(s_corr > threshold)[0])
for s_corr in abs_corrs]
else:
max_corrs = [list(_find_outliers(s_corr, threshold=threshold))
for s_corr in abs_corrs]
am = [l_[i] for l_, i_s in zip(abs_corrs, max_corrs)
for i in i_s]
median_corr_with_target = np.median(am) if len(am) > 0 else 0
polarities = [l_[i] for l_, i_s in zip(corr_polarities, max_corrs)
for i in i_s]
maxmaps = [l_[i] for l_, i_s in zip(all_maps, max_corrs)
for i in i_s]
if len(maxmaps) == 0:
return [], 0, 0, []
newtarget = np.zeros(maxmaps[0].size)
std_of_maps = np.std(np.asarray(maxmaps))
mean_of_maps = np.std(np.asarray(maxmaps))
for maxmap, polarity in zip(maxmaps, polarities):
newtarget += (maxmap / std_of_maps - mean_of_maps) * polarity
newtarget /= len(maxmaps)
newtarget *= std_of_maps
sim_i_o = np.abs(np.corrcoef(target, newtarget)[1, 0])
return newtarget, median_corr_with_target, sim_i_o, max_corrs
@verbose
def corrmap(icas, template, threshold="auto", label=None, ch_type="eeg", *,
sensors=True, show_names=False, contours=6, outlines='head',
sphere=None, image_interp=_INTERPOLATION_DEFAULT,
extrapolate=_EXTRAPOLATE_DEFAULT, border=_BORDER_DEFAULT,
cmap=None, plot=True, show=True, verbose=None):
"""Find similar Independent Components across subjects by map similarity.
Corrmap :footcite:p:`CamposViolaEtAl2009` identifies the best group
match to a supplied template. Typically, feed it a list of fitted ICAs and
a template IC, for example, the blink for the first subject, to identify
specific ICs across subjects.
The specific procedure consists of two iterations. In a first step, the
maps best correlating with the template are identified. In the next step,
the analysis is repeated with the mean of the maps identified in the first
stage.
Run with ``plot`` and ``show`` set to ``True`` and ``label=False`` to find
good parameters. Then, run with labelling enabled to apply the
labelling in the IC objects. (Running with both ``plot`` and ``labels``
off does nothing.)
Outputs a list of fitted ICAs with the indices of the marked ICs in a
specified field.
The original Corrmap website: www.debener.de/corrmap/corrmapplugin1.html
Parameters
----------
icas : list of mne.preprocessing.ICA
A list of fitted ICA objects.
template : tuple | np.ndarray, shape (n_components,)
Either a tuple with two elements (int, int) representing the list
indices of the set from which the template should be chosen, and the
template. E.g., if template=(1, 0), the first IC of the 2nd ICA object
is used.
Or a numpy array whose size corresponds to each IC map from the
supplied maps, in which case this map is chosen as the template.
threshold : "auto" | list of float | float
Correlation threshold for identifying ICs
If "auto", search for the best map by trying all correlations between
0.6 and 0.95. In the original proposal, lower values are considered,
but this is not yet implemented.
If list of floats, search for the best map in the specified range of
correlation strengths. As correlation values, must be between 0 and 1
If float > 0, select ICs correlating better than this.
If float > 1, use z-scoring to identify ICs within subjects (not in
original Corrmap)
Defaults to "auto".
label : None | str
If not None, categorised ICs are stored in a dictionary ``labels_``
under the given name. Preexisting entries will be appended to
(excluding repeats), not overwritten. If None, a dry run is performed
and the supplied ICs are not changed.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg'
The channel type to plot. Defaults to 'eeg'.
%(sensors_topomap)s
%(show_names_topomap)s
%(contours_topomap)s
%(outlines_topomap)s
%(sphere_topomap_auto)s
%(image_interp_topomap)s
.. versionadded:: 1.2
%(extrapolate_topomap)s
.. versionadded:: 1.2
%(border_topomap)s
.. versionadded:: 1.2
%(cmap_topomap_simple)s
plot : bool
Should constructed template and selected maps be plotted? Defaults
to True.
%(show)s
%(verbose)s
Returns
-------
template_fig : Figure
Figure showing the template.
labelled_ics : Figure
Figure showing the labelled ICs in all ICA decompositions.
References
----------
.. footbibliography::
"""
if not isinstance(plot, bool):
raise ValueError("`plot` must be of type `bool`")
same_chans = _check_all_same_channel_names(icas)
if same_chans is False:
raise ValueError("Not all ICA instances have the same channel names. "
"Corrmap requires all instances to have the same "
"montage. Consider interpolating bad channels before "
"running ICA.")
threshold_extra = ''
if threshold == 'auto':
threshold = np.arange(60, 95, dtype=np.float64) / 100.
threshold_extra = ' ("auto")'
all_maps = [ica.get_components().T for ica in icas]
# check if template is an index to one IC in one ICA object, or an array
if len(template) == 2:
target = all_maps[template[0]][template[1]]
is_subject = True
elif template.ndim == 1 and len(template) == all_maps[0].shape[1]:
target = template
is_subject = False
else:
raise ValueError("`template` must be a length-2 tuple or an array the "
"size of the ICA maps.")
template_fig, labelled_ics = None, None
if plot is True:
if is_subject: # plotting from an ICA object
ttl = 'Template from subj. {}'.format(str(template[0]))
template_fig = icas[template[0]].plot_components(
picks=template[1], ch_type=ch_type, title=ttl,
outlines=outlines, cmap=cmap, contours=contours,
show=show, topomap_args=dict(sphere=sphere))
else: # plotting an array
template_fig = _plot_corrmap(
[template], [0], [0], ch_type, icas[0].copy(), "Template",
outlines=outlines, cmap=cmap, contours=contours,
image_interp=image_interp, extrapolate=extrapolate,
border=border, show=show, template=True, sphere=sphere)
template_fig.subplots_adjust(top=0.8)
template_fig.canvas.draw()
# first run: use user-selected map
threshold = np.atleast_1d(np.array(threshold, float)).ravel()
threshold_err = ('No component detected using when z-scoring '
'threshold%s %s, consider using a more lenient '
'threshold' % (threshold_extra, threshold))
if len(all_maps) == 0:
raise RuntimeError(threshold_err)
paths = [_find_max_corrs(all_maps, target, t) for t in threshold]
# find iteration with highest avg correlation with target
new_target, _, _, _ = paths[np.argmax([path[2] for path in paths])]
# second run: use output from first run
if len(all_maps) == 0 or len(new_target) == 0:
raise RuntimeError(threshold_err)
paths = [_find_max_corrs(all_maps, new_target, t) for t in threshold]
del new_target
# find iteration with highest avg correlation with target
_, median_corr, _, max_corrs = paths[
np.argmax([path[1] for path in paths])]
allmaps, indices, subjs, nones = [list() for _ in range(4)]
logger.info('Median correlation with constructed map: %0.3f' % median_corr)
del median_corr
if plot is True:
logger.info('Displaying selected ICs per subject.')
for ii, (ica, max_corr) in enumerate(zip(icas, max_corrs)):
if len(max_corr) > 0:
if isinstance(max_corr[0], np.ndarray):
max_corr = max_corr[0]
if label is not None:
ica.labels_[label] = list(set(list(max_corr) +
ica.labels_.get(label, list())))
if plot is True:
allmaps.extend(ica.get_components()[:, max_corr].T)
subjs.extend([ii] * len(max_corr))
indices.extend(max_corr)
else:
if (label is not None) and (label not in ica.labels_):
ica.labels_[label] = list()
nones.append(ii)
if len(nones) == 0:
logger.info('At least 1 IC detected for each subject.')
else:
logger.info(f'No maps selected for subject{_pl(nones)} {nones}, '
'consider a more liberal threshold.')
if plot is True:
labelled_ics = _plot_corrmap(
allmaps, subjs, indices, ch_type, ica, label, outlines=outlines,
cmap=cmap, sensors=sensors, contours=contours, sphere=sphere,
image_interp=image_interp, extrapolate=extrapolate,
border=border, show=show, show_names=show_names)
return template_fig, labelled_ics
else:
return None
@verbose
def read_ica_eeglab(fname, *, verbose=None):
"""Load ICA information saved in an EEGLAB .set file.
Parameters
----------
fname : str
Complete path to a .set EEGLAB file that contains an ICA object.
%(verbose)s
Returns
-------
ica : instance of ICA
An ICA object based on the information contained in the input file.
"""
from scipy import linalg
eeg = _check_load_mat(fname, None)
info, eeg_montage, _ = _get_info(eeg)
info.set_montage(eeg_montage)
pick_info(info, np.round(eeg['icachansind']).astype(int) - 1, copy=False)
rank = eeg.icasphere.shape[0]
n_components = eeg.icaweights.shape[0]
ica = ICA(method='imported_eeglab', n_components=n_components)
ica.current_fit = "eeglab"
ica.ch_names = info["ch_names"]
ica.n_pca_components = None
ica.n_components_ = n_components
n_ch = len(ica.ch_names)
assert len(eeg.icachansind) == n_ch
ica.pre_whitener_ = np.ones((n_ch, 1))
ica.pca_mean_ = np.zeros(n_ch)
assert eeg.icasphere.shape[1] == n_ch
assert eeg.icaweights.shape == (n_components, rank)
# When PCA reduction is used in EEGLAB, runica returns
# weights= weights*sphere*eigenvectors(:,1:ncomps)';
# sphere = eye(urchans). When PCA reduction is not used, we have:
#
# eeg.icawinv == pinv(eeg.icaweights @ eeg.icasphere)
#
# So in either case, we can use SVD to get our square whitened
# weights matrix (u * s) and our PCA vectors (v) back:
use = eeg.icaweights @ eeg.icasphere
use_check = linalg.pinv(eeg.icawinv)
if not np.allclose(use, use_check, rtol=1e-6):
warn('Mismatch between icawinv and icaweights @ icasphere from EEGLAB '
'possibly due to ICA component removal, assuming icawinv is '
'correct')
use = use_check
u, s, v = _safe_svd(use, full_matrices=False)
ica.unmixing_matrix_ = u * s
ica.pca_components_ = v
ica.pca_explained_variance_ = s * s
ica.info = info
ica._update_mixing_matrix()
ica._update_ica_names()
ica.reject_ = None
return ica
| bsd-3-clause | 6c4f85073d376df5bb9ea1cd3ce65695 | 40.558785 | 126 | 0.575707 | 3.928694 | false | false | false | false |
mne-tools/mne-python | examples/visualization/montage_sgskip.py | 11 | 2070 | # -*- coding: utf-8 -*-
"""
.. _plot_montage:
Plotting sensor layouts of EEG systems
======================================
This example illustrates how to load all the EEG system montages
shipped in MNE-python, and display it on the fsaverage template subject.
""" # noqa: D205, D400
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
import mne
from mne.channels.montage import get_builtin_montages
from mne.datasets import fetch_fsaverage
from mne.viz import set_3d_title, set_3d_view
# %%
# Check all montages against a sphere
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
info.set_montage(montage)
sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head',
trans=mne.Transform("head", "mri", trans=np.eye(4)), # identity
bem=sphere, info=info)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
# %%
# Check all montages against fsaverage
subjects_dir = op.dirname(fetch_fsaverage())
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
# Create dummy info
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
info.set_montage(montage)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,
subject='fsaverage', subjects_dir=subjects_dir, info=info,
coord_frame='mri',
trans='fsaverage', # transform from head coords to fsaverage's MRI
)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
| bsd-3-clause | 0b93e0fb1d2b5a65ca0fd9fe14342831 | 31.34375 | 77 | 0.675362 | 2.982709 | false | false | false | false |
mne-tools/mne-python | mne/minimum_norm/_eloreta.py | 11 | 7270 | # Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
from functools import partial
import numpy as np
from ..defaults import _handle_default
from ..fixes import _safe_svd
from ..utils import warn, logger, sqrtm_sym, eigh
# For the reference implementation of eLORETA (force_equal=False),
# 0 < loose <= 1 all produce solutions that are (more or less)
# the same as free orientation (loose=1) and quite different from
# loose=0 (fixed). If we do force_equal=True, we get a visibly smooth
# transition from 0->1. This is probably because this mode behaves more like
# sLORETA and dSPM in that it weights each orientation for a given source
# uniformly (which is not the case for the reference eLORETA implementation).
#
# If we *reapply the orientation prior* after each eLORETA iteration,
# we can preserve the smooth transition without requiring force_equal=True,
# which is probably more representative of what eLORETA should do. But this
# does not produce results that pass the eye test.
def _compute_eloreta(inv, lambda2, options):
"""Compute the eLORETA solution."""
from .inverse import compute_rank_inverse, _compute_reginv
options = _handle_default('eloreta_options', options)
eps, max_iter = options['eps'], options['max_iter']
force_equal = bool(options['force_equal']) # None means False
# Reassemble the gain matrix (should be fast enough)
if inv['eigen_leads_weighted']:
# We can probably relax this if we ever need to
raise RuntimeError('eLORETA cannot be computed with weighted eigen '
'leads')
G = np.dot(inv['eigen_fields']['data'].T * inv['sing'],
inv['eigen_leads']['data'].T)
del inv['eigen_leads']['data']
del inv['eigen_fields']['data']
del inv['sing']
G = G.astype(np.float64)
n_nzero = compute_rank_inverse(inv)
G /= np.sqrt(inv['source_cov']['data'])
# restore orientation prior
source_std = np.ones(G.shape[1])
if inv['orient_prior'] is not None:
source_std *= np.sqrt(inv['orient_prior']['data'])
G *= source_std
# We do not multiply by the depth prior, as eLORETA should compensate for
# depth bias.
n_src = inv['nsource']
n_chan, n_orient = G.shape
n_orient //= n_src
assert n_orient in (1, 3)
logger.info(' Computing optimized source covariance (eLORETA)...')
if n_orient == 3:
logger.info(' Using %s orientation weights'
% ('uniform' if force_equal else 'independent',))
# src, sens, 3
G_3 = _get_G_3(G, n_orient)
if n_orient != 1 and not force_equal:
# Outer product
R_prior = (source_std.reshape(n_src, 1, 3) *
source_std.reshape(n_src, 3, 1))
else:
R_prior = source_std ** 2
# The following was adapted under BSD license by permission of Guido Nolte
if force_equal or n_orient == 1:
R_shape = (n_src * n_orient,)
R = np.ones(R_shape)
else:
R_shape = (n_src, n_orient, n_orient)
R = np.empty(R_shape)
R[:] = np.eye(n_orient)[np.newaxis]
R *= R_prior
_this_normalize_R = partial(
_normalize_R, n_nzero=n_nzero, force_equal=force_equal,
n_src=n_src, n_orient=n_orient)
G_R_Gt = _this_normalize_R(G, R, G_3)
extra = ' (this make take a while)' if n_orient == 3 else ''
logger.info(' Fitting up to %d iterations%s...'
% (max_iter, extra))
for kk in range(max_iter):
# 1. Compute inverse of the weights (stabilized) and C
s, u = eigh(G_R_Gt)
s = abs(s)
sidx = np.argsort(s)[::-1][:n_nzero]
s, u = s[sidx], u[:, sidx]
with np.errstate(invalid='ignore'):
s = np.where(s > 0, 1 / (s + lambda2), 0)
N = np.dot(u * s, u.T)
del s
# Update the weights
R_last = R.copy()
if n_orient == 1:
R[:] = 1. / np.sqrt((np.dot(N, G) * G).sum(0))
else:
M = np.matmul(np.matmul(G_3, N[np.newaxis]), G_3.swapaxes(-2, -1))
if force_equal:
_, s = sqrtm_sym(M, inv=True)
R[:] = np.repeat(1. / np.mean(s, axis=-1), 3)
else:
R[:], _ = sqrtm_sym(M, inv=True)
R *= R_prior # reapply our prior, eLORETA undoes it
G_R_Gt = _this_normalize_R(G, R, G_3)
# Check for weight convergence
delta = (np.linalg.norm(R.ravel() - R_last.ravel()) /
np.linalg.norm(R_last.ravel()))
logger.debug(' Iteration %s / %s ...%s (%0.1e)'
% (kk + 1, max_iter, extra, delta))
if delta < eps:
logger.info(' Converged on iteration %d (%0.2g < %0.2g)'
% (kk, delta, eps))
break
else:
warn('eLORETA weight fitting did not converge (>= %s)' % eps)
del G_R_Gt
logger.info(' Updating inverse with weighted eigen leads')
G /= source_std # undo our biasing
G_3 = _get_G_3(G, n_orient)
_this_normalize_R(G, R, G_3)
del G_3
if n_orient == 1 or force_equal:
R_sqrt = np.sqrt(R)
else:
R_sqrt = sqrtm_sym(R)[0]
assert R_sqrt.shape == R_shape
A = _R_sqrt_mult(G, R_sqrt)
del R, G # the rest will be done in terms of R_sqrt and A
eigen_fields, sing, eigen_leads = _safe_svd(A, full_matrices=False)
del A
inv['sing'] = sing
inv['reginv'] = _compute_reginv(inv, lambda2)
inv['eigen_leads_weighted'] = True
inv['eigen_leads']['data'] = _R_sqrt_mult(eigen_leads, R_sqrt).T
inv['eigen_fields']['data'] = eigen_fields.T
# XXX in theory we should set inv['source_cov'] properly.
# For fixed ori (or free ori with force_equal=True), we can as these
# are diagonal matrices. But for free ori without force_equal, it's a
# block diagonal 3x3 and we have no efficient way of storing this (and
# storing a covariance matrix with (20484 * 3) ** 2 elements is not going
# to work. So let's just set to nan for now.
# It's not used downstream anyway now that we set
# eigen_leads_weighted = True.
inv['source_cov']['data'].fill(np.nan)
logger.info('[done]')
def _normalize_R(G, R, G_3, n_nzero, force_equal, n_src, n_orient):
"""Normalize R so that lambda2 is consistent."""
if n_orient == 1 or force_equal:
R_Gt = R[:, np.newaxis] * G.T
else:
R_Gt = np.matmul(R, G_3).reshape(n_src * 3, -1)
G_R_Gt = G @ R_Gt
norm = np.trace(G_R_Gt) / n_nzero
G_R_Gt /= norm
R /= norm
return G_R_Gt
def _get_G_3(G, n_orient):
if n_orient == 1:
return None
else:
return G.reshape(G.shape[0], -1, n_orient).transpose(1, 2, 0)
def _R_sqrt_mult(other, R_sqrt):
"""Do other @ R ** 0.5."""
if R_sqrt.ndim == 1:
assert other.shape[1] == R_sqrt.size
out = R_sqrt * other
else:
assert R_sqrt.shape[1:3] == (3, 3)
assert other.shape[1] == np.prod(R_sqrt.shape[:2])
assert other.ndim == 2
n_src = R_sqrt.shape[0]
n_chan = other.shape[0]
out = np.matmul(
R_sqrt, other.reshape(n_chan, n_src, 3).transpose(1, 2, 0)
).reshape(n_src * 3, n_chan).T
return out
| bsd-3-clause | 72001142b8916120b5cc32a72e911959 | 37.670213 | 78 | 0.579505 | 3.136324 | false | false | false | false |
mne-tools/mne-python | mne/viz/_brain/_brain.py | 1 | 153848 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Oleh Kozynets <ok7mailbox@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
# jona-sassenhagen <jona.sassenhagen@gmail.com>
# Joan Massich <mailsik@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
from io import BytesIO
import os
import os.path as op
import time
import copy
import traceback
import warnings
import weakref
import numpy as np
from .colormap import calculate_lut
from .surface import _Surface
from .view import views_dicts, _lh_views_dict
from .callback import (ShowView, TimeCallBack, SmartCallBack,
UpdateLUT, UpdateColorbarScale)
from ..utils import (_show_help_fig, _get_color_list, concatenate_images,
_generate_default_filename, _save_ndarray_img, safe_event)
from .._3d import (_process_clim, _handle_time, _check_views,
_handle_sensor_types, _plot_sensors, _plot_forward)
from .._3d_overlay import _LayeredMesh
from ...defaults import _handle_default, DEFAULTS
from ..._freesurfer import (vertex_to_mni, read_talxfm, read_freesurfer_lut,
_get_head_surface, _get_skull_surface,
_estimate_talxfm_rigid)
from ...io.pick import pick_types
from ...io.meas_info import Info
from ...surface import (mesh_edges, _mesh_borders, _marching_cubes,
get_meg_helmet_surf)
from ...source_space import SourceSpaces
from ...transforms import (Transform, apply_trans, _frame_to_str,
_get_trans, _get_transforms_to_coord_frame)
from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type,
use_log_level, Bunch, _ReuseCycle, warn,
get_subjects_dir, _check_fname, _to_rgb, _ensure_int)
_ARROW_MOVE = 10 # degrees per press
@fill_doc
class Brain(object):
"""Class for visualizing a brain.
.. warning::
The API for this class is not currently complete. We suggest using
:meth:`mne.viz.plot_source_estimates` with the PyVista backend
enabled to obtain a ``Brain`` instance.
Parameters
----------
subject : str
Subject name in Freesurfer subjects dir.
.. versionchanged:: 1.2
This parameter was renamed from ``subject_id`` to ``subject``.
hemi : str
Hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
surf : str
FreeSurfer surface mesh name (ie 'white', 'inflated', etc.).
title : str
Title for the window.
cortex : str, list, dict
Specifies how the cortical surface is rendered. Options:
1. The name of one of the preset cortex styles:
``'classic'`` (default), ``'high_contrast'``,
``'low_contrast'``, or ``'bone'``.
2. A single color-like argument to render the cortex as a single
color, e.g. ``'red'`` or ``(0.1, 0.4, 1.)``.
3. A list of two color-like used to render binarized curvature
values for gyral (first) and sulcal (second). regions, e.g.,
``['red', 'blue']`` or ``[(1, 0, 0), (0, 0, 1)]``.
4. A dict containing keys ``'vmin', 'vmax', 'colormap'`` with
values used to render the binarized curvature (where 0 is gyral,
1 is sulcal).
.. versionchanged:: 0.24
Add support for non-string arguments.
alpha : float in [0, 1]
Alpha level to control opacity of the cortical surface.
size : int | array-like, shape (2,)
The size of the window, in pixels. can be one number to specify
a square window, or a length-2 sequence to specify (width, height).
background : tuple(int, int, int)
The color definition of the background: (red, green, blue).
foreground : matplotlib color
Color of the foreground (will be used for colorbars and text).
None (default) will use black or white depending on the value
of ``background``.
figure : list of Figure | None
If None (default), a new window will be created with the appropriate
views.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
%(views)s
offset : bool | str
If True, shifts the right- or left-most x coordinate of the left and
right surfaces, respectively, to be at zero. This is useful for viewing
inflated surface where hemispheres typically overlap. Can be "auto"
(default) use True with inflated surfaces and False otherwise
(Default: 'auto'). Only used when ``hemi='both'``.
.. versionchanged:: 0.23
Default changed to "auto".
offscreen : bool
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk.
interaction : str
Can be "trackball" (default) or "terrain", i.e. a turntable-style
camera.
units : str
Can be 'm' or 'mm' (default).
%(view_layout)s
silhouette : dict | bool
As a dict, it contains the ``color``, ``linewidth``, ``alpha`` opacity
and ``decimate`` (level of decimation between 0 and 1 or None) of the
brain's silhouette to display. If True, the default values are used
and if False, no silhouette will be displayed. Defaults to False.
%(theme_3d)s
show : bool
Display the window as soon as it is ready. Defaults to True.
block : bool
If True, start the Qt application event loop. Default to False.
Attributes
----------
geo : dict
A dictionary of PyVista surface objects for each hemisphere.
overlays : dict
The overlays.
Notes
-----
This table shows the capabilities of each Brain backend ("✓" for full
support, and "-" for partial support):
.. table::
:widths: auto
+-------------------------------------+--------------+---------------+
| 3D function: | surfer.Brain | mne.viz.Brain |
+=====================================+==============+===============+
| :meth:`add_annotation` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_data` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_dipole` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_foci` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_forward` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_head` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_label` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_sensors` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_skull` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_text` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_volume_labels` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`close` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| data | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| foci | ✓ | |
+-------------------------------------+--------------+---------------+
| labels | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_data` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_dipole` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_forward` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_head` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_labels` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_annotations` | - | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_sensors` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_skull` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_text` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`remove_volume_labels` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`save_image` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`save_movie` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`screenshot` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`show_view` | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| TimeViewer | ✓ | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`get_picked_points` | | ✓ |
+-------------------------------------+--------------+---------------+
| :meth:`add_data(volume) <add_data>` | | ✓ |
+-------------------------------------+--------------+---------------+
| view_layout | | ✓ |
+-------------------------------------+--------------+---------------+
| flatmaps | | ✓ |
+-------------------------------------+--------------+---------------+
| vertex picking | | ✓ |
+-------------------------------------+--------------+---------------+
| label picking | | ✓ |
+-------------------------------------+--------------+---------------+
"""
def __init__(self, subject, hemi='both', surf='pial', title=None,
cortex="classic", alpha=1.0, size=800, background="black",
foreground=None, figure=None, subjects_dir=None,
views='auto', *, offset='auto',
offscreen=False, interaction='trackball', units='mm',
view_layout='vertical', silhouette=False, theme=None,
show=True, block=False):
from ..backends.renderer import backend, _get_renderer
_validate_type(subject, str, 'subject')
if hemi is None:
hemi = 'vol'
hemi = self._check_hemi(hemi, extras=('both', 'split', 'vol'))
if hemi in ('both', 'split'):
self._hemis = ('lh', 'rh')
else:
assert hemi in ('lh', 'rh', 'vol')
self._hemis = (hemi, )
self._view_layout = _check_option('view_layout', view_layout,
('vertical', 'horizontal'))
if figure is not None and not isinstance(figure, int):
backend._check_3d_figure(figure)
if title is None:
self._title = subject
else:
self._title = title
self._interaction = 'trackball'
self._bg_color = _to_rgb(background, name='background')
if foreground is None:
foreground = 'w' if sum(self._bg_color) < 2 else 'k'
self._fg_color = _to_rgb(foreground, name='foreground')
del background, foreground
views = _check_views(surf, views, hemi)
col_dict = dict(lh=1, rh=1, both=1, split=2, vol=1)
shape = (len(views), col_dict[hemi])
if self._view_layout == 'horizontal':
shape = shape[::-1]
self._subplot_shape = shape
size = tuple(np.atleast_1d(size).round(0).astype(int).flat)
if len(size) not in (1, 2):
raise ValueError('"size" parameter must be an int or length-2 '
'sequence of ints.')
size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple
subjects_dir = get_subjects_dir(subjects_dir)
self.time_viewer = False
self._hash = time.time_ns()
self._block = block
self._hemi = hemi
self._units = units
self._alpha = float(alpha)
self._subject = subject
self._subjects_dir = subjects_dir
self._views = views
self._times = None
self._vertex_to_label_id = dict()
self._annotation_labels = dict()
self._labels = {'lh': list(), 'rh': list()}
self._unnamed_label_id = 0 # can only grow
self._annots = {'lh': list(), 'rh': list()}
self._layered_meshes = dict()
self._actors = dict()
self._elevation_rng = [15, 165] # range of motion of camera on theta
self._lut_locked = None
self._cleaned = False
# default values for silhouette
self._silhouette = {
'color': self._bg_color,
'line_width': 2,
'alpha': alpha,
'decimate': 0.9,
}
_validate_type(silhouette, (dict, bool), 'silhouette')
if isinstance(silhouette, dict):
self._silhouette.update(silhouette)
self.silhouette = True
else:
self.silhouette = silhouette
self._scalar_bar = None
# for now only one time label can be added
# since it is the same for all figures
self._time_label_added = False
# array of data used by TimeViewer
self._data = {}
self.geo = {}
self.set_time_interpolation('nearest')
geo_kwargs = self._cortex_colormap(cortex)
# evaluate at the midpoint of the used colormap
val = -geo_kwargs['vmin'] / (geo_kwargs['vmax'] - geo_kwargs['vmin'])
self._brain_color = geo_kwargs['colormap'](val)
# load geometry for one or both hemispheres as necessary
_validate_type(offset, (str, bool), 'offset')
if isinstance(offset, str):
_check_option('offset', offset, ('auto',), extra='when str')
offset = (surf in ('inflated', 'flat'))
offset = None if (not offset or hemi != 'both') else 0.0
logger.debug(f'Hemi offset: {offset}')
_validate_type(theme, (str, None), 'theme')
self._renderer = _get_renderer(name=self._title, size=size,
bgcolor=self._bg_color,
shape=shape,
fig=figure)
self._renderer._window_close_connect(self._clean)
self._renderer._window_set_theme(theme)
self.plotter = self._renderer.plotter
self._setup_canonical_rotation()
# plot hemis
for h in ('lh', 'rh'):
if h not in self._hemis:
continue # don't make surface if not chosen
# Initialize a Surface object as the geometry
geo = _Surface(self._subject, h, surf, self._subjects_dir,
offset, units=self._units, x_dir=self._rigid[0, :3])
# Load in the geometry and curvature
geo.load_geometry()
geo.load_curvature()
self.geo[h] = geo
for _, _, v in self._iter_views(h):
if self._layered_meshes.get(h) is None:
mesh = _LayeredMesh(
renderer=self._renderer,
vertices=self.geo[h].coords,
triangles=self.geo[h].faces,
normals=self.geo[h].nn,
)
mesh.map() # send to GPU
mesh.add_overlay(
scalars=self.geo[h].bin_curv,
colormap=geo_kwargs["colormap"],
rng=[geo_kwargs["vmin"], geo_kwargs["vmax"]],
opacity=alpha,
name='curv',
)
self._layered_meshes[h] = mesh
# add metadata to the mesh for picking
mesh._polydata._hemi = h
else:
actor = self._layered_meshes[h]._actor
self._renderer.plotter.add_actor(actor, render=False)
if self.silhouette:
mesh = self._layered_meshes[h]
self._renderer._silhouette(
mesh=mesh._polydata,
color=self._silhouette["color"],
line_width=self._silhouette["line_width"],
alpha=self._silhouette["alpha"],
decimate=self._silhouette["decimate"],
)
self._renderer.set_camera(update=False, reset_camera=False,
**views_dicts[h][v])
self.interaction = interaction
self._closed = False
if show:
self.show()
# update the views once the geometry is all set
for h in self._hemis:
for ri, ci, v in self._iter_views(h):
self.show_view(v, row=ri, col=ci, hemi=h)
if surf == 'flat':
self._renderer.set_interaction("rubber_band_2d")
def _setup_canonical_rotation(self):
self._rigid = np.eye(4)
try:
xfm = _estimate_talxfm_rigid(self._subject, self._subjects_dir)
except Exception:
logger.info('Could not estimate rigid Talairach alignment, '
'using identity matrix')
else:
self._rigid[:] = xfm
def setup_time_viewer(self, time_viewer=True, show_traces=True):
"""Configure the time viewer parameters.
Parameters
----------
time_viewer : bool
If True, enable widgets interaction. Defaults to True.
show_traces : bool
If True, enable visualization of time traces. Defaults to True.
Notes
-----
The keyboard shortcuts are the following:
'?': Display help window
'i': Toggle interface
's': Apply auto-scaling
'r': Restore original clim
'c': Clear all traces
'n': Shift the time forward by the playback speed
'b': Shift the time backward by the playback speed
'Space': Start/Pause playback
'Up': Decrease camera elevation angle
'Down': Increase camera elevation angle
'Left': Decrease camera azimuth angle
'Right': Increase camera azimuth angle
"""
from ..backends._utils import _qt_app_exec
if self.time_viewer:
return
if not self._data:
raise ValueError("No data to visualize. See ``add_data``.")
self.time_viewer = time_viewer
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [-1, 15]
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.01
self.default_status_bar_msg = "Press ? for help"
self.default_label_extract_modes = {
"stc": ["mean", "max"],
"src": ["mean_flip", "pca_flip", "auto"],
}
self.default_trace_modes = ('vertex', 'label')
self.annot = None
self.label_extract_mode = None
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_list = _get_color_list()
# remove grey for better contrast on the brain
self.color_list.remove("#7f7f7f")
self.color_cycle = _ReuseCycle(self.color_list)
self.mpl_canvas = None
self.help_canvas = None
self.rms = None
self.picked_patches = {key: list() for key in all_keys}
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._spheres = list()
self._mouse_no_mvt = -1
self.callbacks = dict()
self.widgets = dict()
self.keys = ('fmin', 'fmid', 'fmax')
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
self.show_traces = True
self.separate_canvas = False
self.traces_mode = 'vertex'
if show_traces == 'separate':
self.separate_canvas = True
elif show_traces == 'label':
self.traces_mode = 'label'
else:
assert show_traces == 'vertex' # guaranteed above
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.traces_mode = 'vertex'
self.separate_canvas = False
del show_traces
self._configure_time_label()
self._configure_scalar_bar()
self._configure_shortcuts()
self._configure_picking()
self._configure_tool_bar()
self._configure_dock()
self._configure_menu()
self._configure_status_bar()
self._configure_playback()
self._configure_help()
# show everything at the end
self.toggle_interface()
self._renderer.show()
# sizes could change, update views
for hemi in ('lh', 'rh'):
for ri, ci, v in self._iter_views(hemi):
self.show_view(view=v, row=ri, col=ci)
self._renderer._process_events()
self._renderer._update()
# finally, show the MplCanvas
if self.show_traces:
self.mpl_canvas.show()
if self._block:
_qt_app_exec(self._renderer.figure.store["app"])
@safe_event
def _clean(self):
# resolve the reference cycle
self._renderer._window_close_disconnect()
self.clear_glyphs()
self.remove_annotations()
# clear init actors
for hemi in self._layered_meshes:
self._layered_meshes[hemi]._clean()
self._clear_callbacks()
self._clear_widgets()
if getattr(self, 'mpl_canvas', None) is not None:
self.mpl_canvas.clear()
if getattr(self, 'act_data_smooth', None) is not None:
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
# XXX this should be done in PyVista
for renderer in self._renderer._all_renderers:
renderer.RemoveAllLights()
# app_window cannot be set to None because it is used in __del__
for key in ('lighting', 'interactor', '_RenderWindow'):
setattr(self.plotter, key, None)
# Qt LeaveEvent requires _Iren so we use _FakeIren instead of None
# to resolve the ref to vtkGenericRenderWindowInteractor
self.plotter._Iren = _FakeIren()
if getattr(self.plotter, 'picker', None) is not None:
self.plotter.picker = None
# XXX end PyVista
for key in ('plotter', 'window', 'dock', 'tool_bar', 'menu_bar',
'interactor', 'mpl_canvas', 'time_actor',
'picked_renderer', 'act_data_smooth', '_scalar_bar',
'actions', 'widgets', 'geo', '_data'):
setattr(self, key, None)
self._cleaned = True
def toggle_interface(self, value=None):
"""Toggle the interface.
Parameters
----------
value : bool | None
If True, the widgets are shown and if False, they
are hidden. If None, the state of the widgets is
toggled. Defaults to None.
"""
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar and dock
with self._renderer._window_ensure_minimum_sizes():
if self.visibility:
self._renderer._dock_show()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_on")
else:
self._renderer._dock_hide()
self._renderer._tool_bar_update_button_icon(
name="visibility", icon_name="visibility_off")
self._renderer._update()
def apply_auto_scaling(self):
"""Detect automatically fitting scaling parameters."""
self._update_auto_scaling()
def restore_user_scaling(self):
"""Restore original scaling parameters."""
self._update_auto_scaling(restore=True)
def toggle_playback(self, value=None):
"""Toggle time playback.
Parameters
----------
value : bool | None
If True, automatic time playback is enabled and if False,
it's disabled. If None, the state of time playback is toggled.
Defaults to None.
"""
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="pause")
else:
self._renderer._tool_bar_update_button_icon(
name="play", icon_name="play")
if self.playback:
time_data = self._data['time']
max_time = np.max(time_data)
if self._current_time == max_time: # start over
self.set_time_point(0) # first index
self._last_tick = time.time()
def reset(self):
"""Reset view and time step."""
self.reset_view()
max_time = len(self._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self._data["initial_time_idx"],
update_widget=True,
)
self._renderer._update()
def set_playback_speed(self, speed):
"""Set the time playback speed.
Parameters
----------
speed : float
The speed of the playback.
"""
self.playback_speed = speed
@safe_event
def _play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self._data['time']
times = np.arange(self._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def _configure_time_label(self):
self.time_actor = self._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
def _configure_scalar_bar(self):
if self._scalar_bar is not None:
self._scalar_bar.SetOrientationToVertical()
self._scalar_bar.SetHeight(0.6)
self._scalar_bar.SetWidth(0.05)
self._scalar_bar.SetPosition(0.02, 0.2)
def _configure_dock_time_widget(self, layout=None):
len_time = len(self._data['time']) - 1
if len_time < 1:
return
layout = self._renderer.dock_layout if layout is None else layout
hlayout = self._renderer._dock_add_layout(vertical=False)
self.widgets["min_time"] = self._renderer._dock_add_label(
value="-", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["current_time"] = self._renderer._dock_add_label(
value="x", layout=hlayout)
self._renderer._dock_add_stretch(hlayout)
self.widgets["max_time"] = self._renderer._dock_add_label(
value="+", layout=hlayout)
self._renderer._layout_add_widget(layout, hlayout)
min_time = float(self._data['time'][0])
max_time = float(self._data['time'][-1])
self.widgets["min_time"].set_value(f"{min_time: .3f}")
self.widgets["max_time"].set_value(f"{max_time: .3f}")
self.widgets["current_time"].set_value(f"{self._current_time: .3f}")
def _configure_dock_playback_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
len_time = len(self._data['time']) - 1
# Time widget
if len_time < 1:
self.callbacks["time"] = None
self.widgets["time"] = None
else:
self.callbacks["time"] = TimeCallBack(
brain=self,
callback=self.plot_time_line,
)
self.widgets["time"] = self._renderer._dock_add_slider(
name="Time (s)",
value=self._data['time_idx'],
rng=[0, len_time],
double=True,
callback=self.callbacks["time"],
compact=False,
layout=layout,
)
self.callbacks["time"].widget = self.widgets["time"]
# Time labels
if len_time < 1:
self.widgets["min_time"] = None
self.widgets["max_time"] = None
self.widgets["current_time"] = None
else:
self._configure_dock_time_widget(layout)
self.callbacks["time"].label = self.widgets["current_time"]
# Playback speed widget
if len_time < 1:
self.callbacks["playback_speed"] = None
self.widgets["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartCallBack(
callback=self.set_playback_speed,
)
self.widgets["playback_speed"] = self._renderer._dock_add_spin_box(
name="Speed",
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range,
callback=self.callbacks["playback_speed"],
layout=layout,
)
self.callbacks["playback_speed"].widget = \
self.widgets["playback_speed"]
# Time label
current_time = self._current_time
assert current_time is not None # should never be the case, float
time_label = self._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
def _configure_dock_orientation_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
# Renderer widget
rends = [str(i) for i in range(len(self._renderer._all_renderers))]
if len(rends) > 1:
def select_renderer(idx):
idx = int(idx)
loc = self._renderer._index_to_loc(idx)
self.plotter.subplot(*loc)
self.callbacks["renderer"] = SmartCallBack(
callback=select_renderer,
)
self.widgets["renderer"] = self._renderer._dock_add_combo_box(
name="Renderer",
value="0",
rng=rends,
callback=self.callbacks["renderer"],
layout=layout,
)
self.callbacks["renderer"].widget = \
self.widgets["renderer"]
# Use 'lh' as a reference for orientation for 'both'
if self._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self._hemis
orientation_data = [None] * len(rends)
for hemi in hemis_ref:
for ri, ci, v in self._iter_views(hemi):
idx = self._renderer._loc_to_index((ri, ci))
if v == 'flat':
_data = None
else:
_data = dict(default=v, hemi=hemi, row=ri, col=ci)
orientation_data[idx] = _data
self.callbacks["orientation"] = ShowView(
brain=self,
data=orientation_data,
)
self.widgets["orientation"] = self._renderer._dock_add_combo_box(
name=None,
value=self.orientation[0],
rng=self.orientation,
callback=self.callbacks["orientation"],
layout=layout,
)
def _configure_dock_colormap_widget(self, name):
layout = self._renderer._dock_add_group_box(name)
self._renderer._dock_add_label(
value="min / mid / max",
align=True,
layout=layout,
)
up = UpdateLUT(brain=self)
for key in self.keys:
hlayout = self._renderer._dock_add_layout(vertical=False)
rng = _get_range(self)
self.callbacks[key] = lambda value, key=key: up(**{key: value})
self.widgets[key] = self._renderer._dock_add_slider(
name=None,
value=self._data[key],
rng=rng,
callback=self.callbacks[key],
double=True,
layout=hlayout,
)
self.widgets[f"entry_{key}"] = self._renderer._dock_add_spin_box(
name=None,
value=self._data[key],
callback=self.callbacks[key],
rng=rng,
layout=hlayout,
)
up.widgets[key] = [self.widgets[key], self.widgets[f"entry_{key}"]]
self._renderer._layout_add_widget(layout, hlayout)
# reset / minus / plus
hlayout = self._renderer._dock_add_layout(vertical=False)
self._renderer._dock_add_label(
value="Rescale",
align=True,
layout=hlayout,
)
self.widgets["reset"] = self._renderer._dock_add_button(
name="↺",
callback=self.restore_user_scaling,
layout=hlayout,
style='toolbutton',
)
for key, char, val in (("fminus", "➖", 1.2 ** -0.25),
("fplus", "➕", 1.2 ** 0.25)):
self.callbacks[key] = UpdateColorbarScale(
brain=self,
factor=val,
)
self.widgets[key] = self._renderer._dock_add_button(
name=char,
callback=self.callbacks[key],
layout=hlayout,
style='toolbutton',
)
self._renderer._layout_add_widget(layout, hlayout)
# register colorbar slider representations
widgets = {key: self.widgets[key] for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fminus", "fplus"):
self.callbacks[name].widgets = widgets
def _configure_dock_trace_widget(self, name):
if not self.show_traces:
return
# do not show trace mode for volumes
if (self._data.get('src', None) is not None and
self._data['src'].kind == 'volume'):
self._configure_vertex_time_course()
return
layout = self._renderer._dock_add_group_box(name)
weakself = weakref.ref(self)
# setup candidate annots
def _set_annot(annot, weakself=weakself):
self = weakself()
if self is None:
return
self.clear_glyphs()
self.remove_labels()
self.remove_annotations()
self.annot = annot
if annot == 'None':
self.traces_mode = 'vertex'
self._configure_vertex_time_course()
else:
self.traces_mode = 'label'
self._configure_label_time_course()
self._renderer._update()
# setup label extraction parameters
def _set_label_mode(mode, weakself=weakself):
self = weakself()
if self is None:
return
if self.traces_mode != 'label':
return
glyphs = copy.deepcopy(self.picked_patches)
self.label_extract_mode = mode
self.clear_glyphs()
for hemi in self._hemis:
for label_id in glyphs[hemi]:
label = self._annotation_labels[hemi][label_id]
vertex_id = label.vertices[0]
self._add_label_glyph(hemi, None, vertex_id)
self.mpl_canvas.axes.relim()
self.mpl_canvas.axes.autoscale_view()
self.mpl_canvas.update_plot()
self._renderer._update()
from ...source_estimate import _get_allowed_label_modes
from ...label import _read_annot_cands
dir_name = op.join(self._subjects_dir, self._subject, 'label')
cands = _read_annot_cands(dir_name, raise_error=False)
cands = cands + ['None']
self.annot = cands[0]
stc = self._data["stc"]
modes = _get_allowed_label_modes(stc)
if self._data["src"] is None:
modes = [m for m in modes if m not in
self.default_label_extract_modes["src"]]
self.label_extract_mode = modes[-1]
if self.traces_mode == 'vertex':
_set_annot('None')
else:
_set_annot(self.annot)
self.widgets["annotation"] = self._renderer._dock_add_combo_box(
name="Annotation",
value=self.annot,
rng=cands,
callback=_set_annot,
layout=layout,
)
self.widgets["extract_mode"] = self._renderer._dock_add_combo_box(
name="Extract mode",
value=self.label_extract_mode,
rng=modes,
callback=_set_label_mode,
layout=layout,
)
def _configure_dock(self):
self._renderer._dock_initialize()
self._configure_dock_playback_widget(name="Playback")
self._configure_dock_orientation_widget(name="Orientation")
self._configure_dock_colormap_widget(name="Color Limits")
self._configure_dock_trace_widget(name="Trace")
# Smoothing widget
self.callbacks["smoothing"] = SmartCallBack(
callback=self.set_data_smoothing,
)
self.widgets["smoothing"] = self._renderer._dock_add_spin_box(
name="Smoothing",
value=self._data['smoothing_steps'],
rng=self.default_smoothing_range,
callback=self.callbacks["smoothing"],
double=False
)
self.callbacks["smoothing"].widget = \
self.widgets["smoothing"]
self._renderer._dock_finalize()
def _configure_playback(self):
self._renderer._playback_initialize(
func=self._play,
timeout=self.refresh_rate_ms,
value=self._data['time_idx'],
rng=[0, len(self._data['time']) - 1],
time_widget=self.widgets["time"],
play_widget=self.widgets["play"],
)
def _configure_mplcanvas(self):
# Get the fractional components for the brain and mpl
self.mpl_canvas = self._renderer._window_get_mplcanvas(
brain=self,
interactor_fraction=self.interactor_fraction,
show_traces=self.show_traces,
separate_canvas=self.separate_canvas
)
xlim = [np.min(self._data['time']),
np.max(self._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.separate_canvas:
self._renderer._window_adjust_mplcanvas_layout()
self.mpl_canvas.set_color(
bg_color=self._bg_color,
fg_color=self._fg_color,
)
def _configure_vertex_time_course(self):
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
# plot RMS of the activation
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
rms = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
del y
self.rms, = self.mpl_canvas.axes.plot(
self._data['time'], rms,
lw=3, label='RMS', zorder=3, color=self._fg_color,
alpha=0.5, ls=':')
# now plot the time line
self.plot_time_line(update=False)
# then the picked points
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self._data[hemi]
vertices = hemi_data['vertices']
# simulate a picked renderer
if self._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self._renderer._all_renderers[idx]
# initialize the default point
if self._data['initial_time'] is not None:
# pick at that time
use_data = act_data[
:, [np.round(self._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = self._layered_meshes[hemi]._polydata
vertex_id = vertices[ind[0]]
self._add_vertex_glyph(hemi, mesh, vertex_id, update=False)
def _configure_picking(self):
# get data for each hemi
from scipy import sparse
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
self._renderer._update_picking_callback(
self._on_mouse_move,
self._on_button_press,
self._on_button_release,
self._on_pick
)
def _configure_tool_bar(self):
self._renderer._tool_bar_initialize(name="Toolbar")
weakself = weakref.ref(self)
def save_image(filename, weakself=weakself):
self = weakself()
if self is None:
return
self.save_image(filename)
self._renderer._tool_bar_add_file_button(
name="screenshot",
desc="Take a screenshot",
func=save_image,
)
def save_movie(filename, weakself=weakself):
self = weakself()
if self is None:
return
self.save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed))
self._renderer._tool_bar_add_file_button(
name="movie",
desc="Save movie...",
func=save_movie,
shortcut="ctrl+shift+s",
)
self._renderer._tool_bar_add_button(
name="visibility",
desc="Toggle Controls",
func=self.toggle_interface,
icon_name="visibility_on"
)
self.widgets["play"] = self._renderer._tool_bar_add_play_button(
name="play",
desc="Play/Pause",
func=self.toggle_playback,
shortcut=" ",
)
self._renderer._tool_bar_add_button(
name="reset",
desc="Reset",
func=self.reset,
)
self._renderer._tool_bar_add_button(
name="scale",
desc="Auto-Scale",
func=self.apply_auto_scaling,
)
self._renderer._tool_bar_add_button(
name="clear",
desc="Clear traces",
func=self.clear_glyphs,
)
self._renderer._tool_bar_add_spacer()
self._renderer._tool_bar_add_button(
name="help",
desc="Help",
func=self.help,
shortcut="?",
)
def _shift_time(self, op):
self.callbacks["time"](
value=(op(self._current_time, self.playback_speed)),
time_as_index=False,
update_widget=True,
)
def _rotate_azimuth(self, value):
azimuth = (self._renderer.figure._azimuth + value) % 360
self._renderer.set_camera(azimuth=azimuth, reset_camera=False)
def _rotate_elevation(self, value):
elevation = np.clip(
self._renderer.figure._elevation + value,
self._elevation_rng[0],
self._elevation_rng[1],
)
self._renderer.set_camera(elevation=elevation, reset_camera=False)
def _configure_shortcuts(self):
# First, we remove the default bindings:
self._clear_callbacks()
# Then, we add our own:
self.plotter.add_key_event("i", self.toggle_interface)
self.plotter.add_key_event("s", self.apply_auto_scaling)
self.plotter.add_key_event("r", self.restore_user_scaling)
self.plotter.add_key_event("c", self.clear_glyphs)
self.plotter.add_key_event("n", partial(self._shift_time,
op=lambda x, y: x + y))
self.plotter.add_key_event("b", partial(self._shift_time,
op=lambda x, y: x - y))
for key, func, sign in (("Left", self._rotate_azimuth, 1),
("Right", self._rotate_azimuth, -1),
("Up", self._rotate_elevation, 1),
("Down", self._rotate_elevation, -1)):
self.plotter.add_key_event(key, partial(func, sign * _ARROW_MOVE))
def _configure_menu(self):
self._renderer._menu_initialize()
self._renderer._menu_add_submenu(
name="help",
desc="Help",
)
self._renderer._menu_add_button(
menu_name="help",
name="help",
desc="Show MNE key bindings\t?",
func=self.help,
)
def _configure_status_bar(self):
self._renderer._status_bar_initialize()
self.status_msg = self._renderer._status_bar_add_label(
self.default_status_bar_msg, stretch=1)
self.status_progress = self._renderer._status_bar_add_progress_bar()
if self.status_progress is not None:
self.status_progress.hide()
def _on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def _on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def _on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
try:
# pyvista<0.30.0
self.picked_renderer = \
self.plotter.iren.FindPokedRenderer(x, y)
except AttributeError:
# pyvista>=0.30.0
self.picked_renderer = \
self.plotter.iren.interactor.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def _on_pick(self, vtk_picker, event):
if not self.show_traces:
return
# vtk_picker is a vtkCellPicker
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return # don't pick
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_glyph
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_glyph"):
self._remove_vertex_glyph(mesh)
return
# 3) Otherwise, pick the objects in the scene
try:
hemi = mesh._hemi
except AttributeError: # volume
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None: # no data to add for hemi
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
# VTK will give us the point closest to the viewer in the vol.
# We want to pick the point with the maximum value along the
# camera-to-click array, which fortunately we can get "just"
# by inspecting the points that are sufficiently close to the
# ray.
grid = mesh = self._data[hemi]['grid']
vertices = self._data[hemi]['vertices']
coords = self._data[hemi]['grid_coords'][vertices]
scalars = grid.cell_data['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
# the magic formula: distance from a ray to a given point
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return # weird point on edge of volume?
# useful for debugging the ray by mapping it into the volume:
# dists = dists - dists.min()
# dists = (1. - dists / dists.max()) * self._cmap_range[1]
# grid.cell_data['values'][vertices] = dists * mask
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
# Naive way: convert pos directly to idx; i.e., apply mri_src_t
# shape = self._data[hemi]['grid_shape']
# taking into account the cell vs point difference (spacing/2)
# shift = np.array(grid.GetOrigin()) + spacing / 2.
# ijk = np.round((pos - shift) / spacing).astype(int)
# vertex_id = np.ravel_multi_index(ijk, shape, order='F')
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if self.traces_mode == 'label':
self._add_label_glyph(hemi, mesh, vertex_id)
else:
self._add_vertex_glyph(hemi, mesh, vertex_id)
def _add_label_glyph(self, hemi, mesh, vertex_id):
if hemi == 'vol':
return
label_id = self._vertex_to_label_id[hemi][vertex_id]
label = self._annotation_labels[hemi][label_id]
# remove the patch if already picked
if label_id in self.picked_patches[hemi]:
self._remove_label_glyph(hemi, label_id)
return
if hemi == label.hemi:
self.add_label(label, borders=True, reset_camera=False)
self.picked_patches[hemi].append(label_id)
def _remove_label_glyph(self, hemi, label_id):
label = self._annotation_labels[hemi][label_id]
label._line.remove()
self.color_cycle.restore(label._color)
self.mpl_canvas.update_plot()
self._layered_meshes[hemi].remove_overlay(label.name)
self.picked_patches[hemi].remove(label_id)
def _add_vertex_glyph(self, hemi, mesh, vertex_id, update=True):
if vertex_id in self.picked_points[hemi]:
return
# skip if the wrong hemi is selected
if self.act_data_smooth[hemi][0] is None:
return
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color, update=update)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# should just be GetCentroid(center), but apparently it's VTK9+:
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
try:
lst = self._renderer._all_renderers._renderers
except AttributeError:
lst = self._renderer._all_renderers
rindex = lst.index(self.picked_renderer)
row, col = self._renderer._index_to_loc(rindex)
actors = list()
spheres = list()
for _ in self._iter_views(hemi):
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = self._renderer._sphere(
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_glyph = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
return sphere
def _remove_vertex_glyph(self, mesh, render=True):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors, render=False)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
if render:
self._renderer._update()
self.pick_table.pop(vertex_id)
def clear_glyphs(self):
"""Clear the picking glyphs."""
if not self.time_viewer:
return
for sphere in list(self._spheres): # will remove itself, so copy
self._remove_vertex_glyph(sphere, render=False)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
for hemi in self._hemis:
for label_id in list(self.picked_patches[hemi]):
self._remove_label_glyph(hemi, label_id)
assert sum(len(v) for v in self.picked_patches.values()) == 0
if self.rms is not None:
self.rms.remove()
self.rms = None
self._renderer._update()
def plot_time_course(self, hemi, vertex_id, color, update=True):
"""Plot the vertex time course.
Parameters
----------
hemi : str
The hemisphere id of the vertex.
vertex_id : int
The vertex identifier in the mesh.
color : matplotlib color
The color of the time course.
update : bool
Force an update of the plot. Defaults to True.
Returns
-------
line : matplotlib object
The time line object.
"""
if self.mpl_canvas is None:
return
time = self._data['time'].copy() # avoid circular ref
mni = None
if hemi == 'vol':
hemi_str = 'V'
xfm = read_talxfm(
self._subject, self._subjects_dir)
if self._units == 'mm':
xfm['trans'][:3, 3] *= 1000.
ijk = np.unravel_index(
vertex_id, self._data[hemi]['grid_shape'], order='F')
src_mri_t = self._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
try:
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self._subject,
subjects_dir=self._subjects_dir
)
except Exception:
mni = None
if mni is not None:
mni = ' MNI: ' + ', '.join('%5.1f' % m for m in mni)
else:
mni = ''
label = "{}:{}{}".format(hemi_str, str(vertex_id).ljust(6), mni)
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
update=update,
)
return line
def plot_time_line(self, update=True):
"""Add the time line to the MPL widget.
Parameters
----------
update : bool
Force an update of the plot. Defaults to True.
"""
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self._fg_color,
lw=1,
update=update,
)
self.time_line.set_xdata(current_time)
if update:
self.mpl_canvas.update_plot()
def _configure_help(self):
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('n', 'Shift the time forward by the playback speed'),
('b', 'Shift the time backward by the playback speed'),
('Space', 'Start/Pause playback'),
('Up', 'Decrease camera elevation angle'),
('Down', 'Increase camera elevation angle'),
('Left', 'Decrease camera azimuth angle'),
('Right', 'Increase camera azimuth angle'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
self.help_canvas = self._renderer._window_get_simple_canvas(
width=5, height=2, dpi=80)
_show_help_fig(
col1=text1,
col2=text2,
fig_help=self.help_canvas.fig,
ax=self.help_canvas.axes,
show=False,
)
def help(self):
"""Display the help window."""
self.help_canvas.show()
def _clear_callbacks(self):
if not hasattr(self, 'callbacks'):
return
for callback in self.callbacks.values():
if callback is not None:
for key in ('plotter', 'brain', 'callback',
'widget', 'widgets'):
setattr(callback, key, None)
self.callbacks.clear()
# Remove the default key binding
if getattr(self, "iren", None) is not None:
self.plotter.iren.clear_key_event_callbacks()
def _clear_widgets(self):
if not hasattr(self, 'widgets'):
return
for widget in self.widgets.values():
if widget is not None:
for key in ('triggered', 'floatValueChanged'):
setattr(widget, key, None)
self.widgets.clear()
@property
def interaction(self):
"""The interaction style."""
return self._interaction
@interaction.setter
def interaction(self, interaction):
"""Set the interaction style."""
_validate_type(interaction, str, 'interaction')
_check_option('interaction', interaction, ('trackball', 'terrain'))
for _ in self._iter_views('vol'): # will traverse all
self._renderer.set_interaction(interaction)
def _cortex_colormap(self, cortex):
"""Return the colormap corresponding to the cortex."""
from .._3d import _get_cmap
from matplotlib.colors import ListedColormap
colormap_map = dict(classic=dict(colormap="Greys",
vmin=-1, vmax=2),
high_contrast=dict(colormap="Greys",
vmin=-.1, vmax=1.3),
low_contrast=dict(colormap="Greys",
vmin=-5, vmax=5),
bone=dict(colormap="bone_r",
vmin=-.2, vmax=2),
)
_validate_type(cortex, (str, dict, list, tuple), 'cortex')
if isinstance(cortex, str):
if cortex in colormap_map:
cortex = colormap_map[cortex]
else:
cortex = [cortex] * 2
if isinstance(cortex, (list, tuple)):
_check_option('len(cortex)', len(cortex), (2, 3),
extra='when cortex is a list or tuple')
if len(cortex) == 3:
cortex = [cortex] * 2
cortex = list(cortex)
for ci, c in enumerate(cortex):
cortex[ci] = _to_rgb(c, name='cortex')
cortex = dict(
colormap=ListedColormap(cortex, name='custom binary'),
vmin=0, vmax=1)
cortex = dict(
vmin=float(cortex['vmin']),
vmax=float(cortex['vmax']),
colormap=_get_cmap(cortex['colormap']),
)
return cortex
def _remove(self, item, render=False):
"""Remove actors from the rendered scene."""
if item in self._actors:
logger.debug(
f'Removing {len(self._actors[item])} {item} actor(s)')
for actor in self._actors[item]:
self._renderer.plotter.remove_actor(actor, render=False)
self._actors.pop(item) # remove actor list
if render:
self._renderer._update()
def _add_actor(self, item, actor):
"""Add an actor to the internal register."""
if item in self._actors: # allows adding more than one
self._actors[item].append(actor)
else:
self._actors[item] = [actor]
@verbose
def add_data(self, array, fmin=None, fmid=None, fmax=None,
thresh=None, center=None, transparent=False, colormap="auto",
alpha=1, vertices=None, smoothing_steps=None, time=None,
time_label="auto", colorbar=True,
hemi=None, remove_existing=None, time_label_size=None,
initial_time=None, scale_factor=None, vector_alpha=None,
clim=None, src=None, volume_options=0.4, colorbar_kwargs=None,
verbose=None):
"""Display data from a numpy array on the surface or volume.
This provides a similar interface to
:meth:`surfer.Brain.add_overlay`, but it displays
it with a single colormap. It offers more flexibility over the
colormap, and provides a way to display four-dimensional data
(i.e., a timecourse) or five-dimensional data (i.e., a
vector-valued timecourse).
.. note:: ``fmin`` sets the low end of the colormap, and is separate
from thresh (this is a different convention from
:meth:`surfer.Brain.add_overlay`).
Parameters
----------
array : numpy array, shape (n_vertices[, 3][, n_times])
Data array. For the data to be understood as vector-valued
(3 values per vertex corresponding to X/Y/Z surface RAS),
then ``array`` must be have all 3 dimensions.
If vectors with no time dimension are desired, consider using a
singleton (e.g., ``np.newaxis``) to create a "time" dimension
and pass ``time_label=None`` (vector values are not supported).
%(fmin_fmid_fmax)s
%(thresh)s
%(center)s
%(transparent)s
colormap : str, list of color, or array
Name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255), the default "auto" chooses a default divergent
colormap, if "center" is given (currently "icefire"), otherwise a
default sequential colormap (currently "rocket").
alpha : float in [0, 1]
Alpha level to control opacity of the overlay.
vertices : numpy array
Vertices for which the data is defined (needed if
``len(data) < nvtx``).
smoothing_steps : int or None
Number of smoothing steps (smoothing is used if len(data) < nvtx)
The value 'nearest' can be used too. None (default) will use as
many as necessary to fill the surface.
time : numpy array
Time points in the data array (if data is 2D or 3D).
%(time_label)s
colorbar : bool
Whether to add a colorbar to the figure. Can also be a tuple
to give the (row, col) index of where to put the colorbar.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
Not supported yet.
Remove surface added by previous "add_data" call. Useful for
conserving memory when displaying different data in a loop.
time_label_size : int
Font size of the time label (default 14).
initial_time : float | None
Time initially shown in the plot. ``None`` to use the first time
sample (default).
scale_factor : float | None (default)
The scale factor to use when displaying glyphs for vector-valued
data.
vector_alpha : float | None
Alpha level to control opacity of the arrows. Only used for
vector-valued data. If None (default), ``alpha`` is used.
clim : dict
Original clim arguments.
%(src_volume_options)s
colorbar_kwargs : dict | None
Options to pass to :meth:`pyvista.Plotter.add_scalar_bar`
(e.g., ``dict(title_font_size=10)``).
%(verbose)s
Notes
-----
If the data is defined for a subset of vertices (specified
by the "vertices" parameter), a smoothing method is used to interpolate
the data onto the high resolution surface. If the data is defined for
subsampled version of the surface, smoothing_steps can be set to None,
in which case only as many smoothing steps are applied until the whole
surface is filled with non-zeros.
Due to a VTK alpha rendering bug, ``vector_alpha`` is
clamped to be strictly < 1.
"""
_validate_type(transparent, bool, 'transparent')
_validate_type(vector_alpha, ('numeric', None), 'vector_alpha')
_validate_type(scale_factor, ('numeric', None), 'scale_factor')
# those parameters are not supported yet, only None is allowed
_check_option('thresh', thresh, [None])
_check_option('remove_existing', remove_existing, [None])
_validate_type(time_label_size, (None, 'numeric'), 'time_label_size')
if time_label_size is not None:
time_label_size = float(time_label_size)
if time_label_size < 0:
raise ValueError('time_label_size must be positive, got '
f'{time_label_size}')
hemi = self._check_hemi(hemi, extras=['vol'])
stc, array, vertices = self._check_stc(hemi, array, vertices)
array = np.asarray(array)
vector_alpha = alpha if vector_alpha is None else vector_alpha
self._data['vector_alpha'] = vector_alpha
self._data['scale_factor'] = scale_factor
# Create time array and add label if > 1D
if array.ndim <= 1:
time_idx = 0
else:
# check time array
if time is None:
time = np.arange(array.shape[-1])
else:
time = np.asarray(time)
if time.shape != (array.shape[-1],):
raise ValueError('time has shape %s, but need shape %s '
'(array.shape[-1])' %
(time.shape, (array.shape[-1],)))
self._data["time"] = time
if self._n_times is None:
self._times = time
elif len(time) != self._n_times:
raise ValueError("New n_times is different from previous "
"n_times")
elif not np.array_equal(time, self._times):
raise ValueError("Not all time values are consistent with "
"previously set times.")
# initial time
if initial_time is None:
time_idx = 0
else:
time_idx = self._to_time_index(initial_time)
# time label
time_label, _ = _handle_time(time_label, 's', time)
y_txt = 0.05 + 0.1 * bool(colorbar)
if array.ndim == 3:
if array.shape[1] != 3:
raise ValueError('If array has 3 dimensions, array.shape[1] '
'must equal 3, got %s' % (array.shape[1],))
fmin, fmid, fmax = _update_limits(
fmin, fmid, fmax, center, array
)
if colormap == 'auto':
colormap = 'mne' if center is not None else 'hot'
if smoothing_steps is None:
smoothing_steps = 7
elif smoothing_steps == 'nearest':
smoothing_steps = -1
elif isinstance(smoothing_steps, int):
if smoothing_steps < 0:
raise ValueError('Expected value of `smoothing_steps` is'
' positive but {} was given.'.format(
smoothing_steps))
else:
raise TypeError('Expected type of `smoothing_steps` is int or'
' NoneType but {} was given.'.format(
type(smoothing_steps)))
self._data['stc'] = stc
self._data['src'] = src
self._data['smoothing_steps'] = smoothing_steps
self._data['clim'] = clim
self._data['time'] = time
self._data['initial_time'] = initial_time
self._data['time_label'] = time_label
self._data['initial_time_idx'] = time_idx
self._data['time_idx'] = time_idx
self._data['transparent'] = transparent
# data specific for a hemi
self._data[hemi] = dict()
self._data[hemi]['glyph_dataset'] = None
self._data[hemi]['glyph_mapper'] = None
self._data[hemi]['glyph_actor'] = None
self._data[hemi]['array'] = array
self._data[hemi]['vertices'] = vertices
self._data['alpha'] = alpha
self._data['colormap'] = colormap
self._data['center'] = center
self._data['fmin'] = fmin
self._data['fmid'] = fmid
self._data['fmax'] = fmax
self.update_lut()
# 1) add the surfaces first
actor = None
for _ in self._iter_views(hemi):
if hemi in ('lh', 'rh'):
actor = self._layered_meshes[hemi]._actor
else:
src_vol = src[2:] if src.kind == 'mixed' else src
actor, _ = self._add_volume_data(hemi, src_vol, volume_options)
assert actor is not None # should have added one
self._add_actor('data', actor)
# 2) update time and smoothing properties
# set_data_smoothing calls "set_time_point" for us, which will set
# _current_time
self.set_time_interpolation(self.time_interpolation)
self.set_data_smoothing(self._data['smoothing_steps'])
# 3) add the other actors
if colorbar is True:
# bottom left by default
colorbar = (self._subplot_shape[0] - 1, 0)
for ri, ci, v in self._iter_views(hemi):
# Add the time label to the bottommost view
do = (ri, ci) == colorbar
if not self._time_label_added and time_label is not None and do:
time_actor = self._renderer.text2d(
x_window=0.95, y_window=y_txt,
color=self._fg_color,
size=time_label_size,
text=time_label(self._current_time),
justification='right'
)
self._data['time_actor'] = time_actor
self._time_label_added = True
if colorbar and self._scalar_bar is None and do:
kwargs = dict(source=actor, n_labels=8, color=self._fg_color,
bgcolor=self._brain_color[:3])
kwargs.update(colorbar_kwargs or {})
self._scalar_bar = self._renderer.scalarbar(**kwargs)
self._renderer.set_camera(
update=False, reset_camera=False, **views_dicts[hemi][v])
# 4) update the scalar bar and opacity
self.update_lut(alpha=alpha)
def remove_data(self):
"""Remove rendered data from the mesh."""
self._remove('data', render=True)
def _iter_views(self, hemi):
"""Iterate over rows and columns that need to be added to."""
hemi_dict = dict(lh=[0], rh=[0], vol=[0])
if self._hemi == 'split':
hemi_dict.update(rh=[1], vol=[0, 1])
for vi, view in enumerate(self._views):
view_dict = dict(lh=[vi], rh=[vi], vol=[vi])
if self._hemi == 'split':
view_dict.update(vol=[vi, vi])
if self._view_layout == 'vertical':
rows, cols = view_dict, hemi_dict # views are rows, hemis cols
else:
rows, cols = hemi_dict, view_dict # hemis are rows, views cols
for ri, ci in zip(rows[hemi], cols[hemi]):
self._renderer.subplot(ri, ci)
yield ri, ci, view
def remove_labels(self):
"""Remove all the ROI labels from the image."""
for hemi in self._hemis:
mesh = self._layered_meshes[hemi]
for label in self._labels[hemi]:
mesh.remove_overlay(label.name)
self._labels[hemi].clear()
self._renderer._update()
def remove_annotations(self):
"""Remove all annotations from the image."""
for hemi in self._hemis:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.remove_overlay(self._annots[hemi])
if hemi in self._annots:
self._annots[hemi].clear()
self._renderer._update()
def _add_volume_data(self, hemi, src, volume_options):
from ..backends._pyvista import _hide_testing_actor
_validate_type(src, SourceSpaces, 'src')
_check_option('src.kind', src.kind, ('volume',))
_validate_type(
volume_options, (dict, 'numeric', None), 'volume_options')
assert hemi == 'vol'
if not isinstance(volume_options, dict):
volume_options = dict(
resolution=float(volume_options) if volume_options is not None
else None)
volume_options = _handle_default('volume_options', volume_options)
allowed_types = (
['resolution', (None, 'numeric')],
['blending', (str,)],
['alpha', ('numeric', None)],
['surface_alpha', (None, 'numeric')],
['silhouette_alpha', (None, 'numeric')],
['silhouette_linewidth', ('numeric',)],
)
for key, types in allowed_types:
_validate_type(volume_options[key], types,
f'volume_options[{repr(key)}]')
extra_keys = set(volume_options) - set(a[0] for a in allowed_types)
if len(extra_keys):
raise ValueError(
f'volume_options got unknown keys {sorted(extra_keys)}')
blending = _check_option('volume_options["blending"]',
volume_options['blending'],
('composite', 'mip'))
alpha = volume_options['alpha']
if alpha is None:
alpha = 0.4 if self._data[hemi]['array'].ndim == 3 else 1.
alpha = np.clip(float(alpha), 0., 1.)
resolution = volume_options['resolution']
surface_alpha = volume_options['surface_alpha']
if surface_alpha is None:
surface_alpha = min(alpha / 2., 0.1)
silhouette_alpha = volume_options['silhouette_alpha']
if silhouette_alpha is None:
silhouette_alpha = surface_alpha / 4.
silhouette_linewidth = volume_options['silhouette_linewidth']
del volume_options
volume_pos = self._data[hemi].get('grid_volume_pos')
volume_neg = self._data[hemi].get('grid_volume_neg')
center = self._data['center']
if volume_pos is None:
xyz = np.meshgrid(
*[np.arange(s) for s in src[0]['shape']], indexing='ij')
dimensions = np.array(src[0]['shape'], int)
mult = 1000 if self._units == 'mm' else 1
src_mri_t = src[0]['src_mri_t']['trans'].copy()
src_mri_t[:3] *= mult
if resolution is not None:
resolution = resolution * mult / 1000. # to mm
del src, mult
coords = np.array([c.ravel(order='F') for c in xyz]).T
coords = apply_trans(src_mri_t, coords)
self.geo[hemi] = Bunch(coords=coords)
vertices = self._data[hemi]['vertices']
assert self._data[hemi]['array'].shape[0] == len(vertices)
# MNE constructs the source space on a uniform grid in MRI space,
# but mne coreg can change it to be non-uniform, so we need to
# use all three elements here
assert np.allclose(
src_mri_t[:3, :3], np.diag(np.diag(src_mri_t)[:3]))
spacing = np.diag(src_mri_t)[:3]
origin = src_mri_t[:3, 3] - spacing / 2.
scalars = np.zeros(np.prod(dimensions))
scalars[vertices] = 1. # for the outer mesh
grid, grid_mesh, volume_pos, volume_neg = \
self._renderer._volume(dimensions, origin, spacing, scalars,
surface_alpha, resolution, blending,
center)
self._data[hemi]['alpha'] = alpha # incorrectly set earlier
self._data[hemi]['grid'] = grid
self._data[hemi]['grid_mesh'] = grid_mesh
self._data[hemi]['grid_coords'] = coords
self._data[hemi]['grid_src_mri_t'] = src_mri_t
self._data[hemi]['grid_shape'] = dimensions
self._data[hemi]['grid_volume_pos'] = volume_pos
self._data[hemi]['grid_volume_neg'] = volume_neg
actor_pos, _ = self._renderer.plotter.add_actor(
volume_pos, reset_camera=False, name=None, culling=False,
render=False)
actor_neg = actor_mesh = None
if volume_neg is not None:
actor_neg, _ = self._renderer.plotter.add_actor(
volume_neg, reset_camera=False, name=None, culling=False,
render=False)
grid_mesh = self._data[hemi]['grid_mesh']
if grid_mesh is not None:
actor_mesh, prop = self._renderer.plotter.add_actor(
grid_mesh, reset_camera=False, name=None, culling=False,
pickable=False, render=False)
prop.SetColor(*self._brain_color[:3])
prop.SetOpacity(surface_alpha)
if silhouette_alpha > 0 and silhouette_linewidth > 0:
for _ in self._iter_views('vol'):
self._renderer._silhouette(
mesh=grid_mesh.GetInput(),
color=self._brain_color[:3],
line_width=silhouette_linewidth,
alpha=silhouette_alpha,
)
for actor in (actor_pos, actor_neg, actor_mesh):
if actor is not None:
_hide_testing_actor(actor)
return actor_pos, actor_neg
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None,
reset_camera=True):
"""Add an ROI label to the image.
Parameters
----------
label : str | instance of Label
Label filepath or name. Can also be an instance of
an object with attributes "hemi", "vertices", "name", and
optionally "color" and "values" (if scalar_thresh is not None).
color : matplotlib-style color | None
Anything matplotlib accepts: string, RGB, hex, etc. (default
"crimson").
alpha : float in [0, 1]
Alpha level to control opacity.
scalar_thresh : None | float
Threshold the label ids using this value in the label
file's scalar field (i.e. label only vertices with
scalar >= thresh).
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown.
subdir : None | str
If a label is specified as name, subdir can be used to indicate
that the label file is in a sub-directory of the subject's
label directory rather than in the label directory itself (e.g.
for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``
``brain.add_label('cuneus', subdir='aparc')``).
reset_camera : bool
If True, reset the camera view after adding the label. Defaults
to True.
Notes
-----
To remove previously added labels, run Brain.remove_labels().
"""
from ...label import read_label
if isinstance(label, str):
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label = read_label(filepath)
hemi = label.hemi
label_name = os.path.basename(filepath).split('.')[1]
else:
hemi = self._check_hemi(hemi)
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = op.join(self._subjects_dir, self._subject,
'label', label_fname)
else:
filepath = op.join(self._subjects_dir, self._subject,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
label = read_label(filepath)
ids = label.vertices
scalars = label.values
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label.name = 'unnamed' + str(self._unnamed_label_id)
self._unnamed_label_id += 1
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
stc = self._data["stc"]
src = self._data["src"]
tc = stc.extract_label_time_course(label, src=src,
mode=self.label_extract_mode)
tc = tc[0] if tc.ndim == 2 else tc[0, 0, :]
color = next(self.color_cycle)
line = self.mpl_canvas.plot(
self._data['time'], tc, label=label_name,
color=color)
else:
line = None
orig_color = color
color = _to_rgb(color, alpha, alpha=True)
cmap = np.array([(0, 0, 0, 0,), color])
ctable = np.round(cmap * 255).astype(np.uint8)
scalars = np.zeros(self.geo[hemi].coords.shape[0])
scalars[ids] = 1
if borders:
keep_idx = _mesh_borders(self.geo[hemi].faces, scalars)
show = np.zeros(scalars.size, dtype=np.int64)
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(
keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
show[keep_idx] = 1
scalars *= show
for _, _, v in self._iter_views(hemi):
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=scalars,
colormap=ctable,
rng=[np.min(scalars), np.max(scalars)],
opacity=alpha,
name=label_name,
)
if reset_camera:
self._renderer.set_camera(update=False, **views_dicts[hemi][v])
if self.time_viewer and self.show_traces \
and self.traces_mode == 'label':
label._color = orig_color
label._line = line
self._labels[hemi].append(label)
self._renderer._update()
@fill_doc
def add_forward(self, fwd, trans, alpha=1, scale=None):
"""Add a quiver to render positions of dipoles.
Parameters
----------
%(fwd)s
%(trans_not_none)s
%(alpha)s Default 1.
scale : None | float
The size of the arrow representing the dipoles in
:class:`mne.viz.Brain` units. Default 1.5mm.
Notes
-----
.. versionadded:: 1.0
"""
head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0]
del trans
if scale is None:
scale = 1.5 if self._units == 'mm' else 1.5e-3
error_msg = ('Unexpected forward model coordinate frame '
'{}, must be "head" or "mri"')
if fwd['coord_frame'] in _frame_to_str:
fwd_frame = _frame_to_str[fwd['coord_frame']]
if fwd_frame == 'mri':
fwd_trans = Transform('mri', 'mri')
elif fwd_frame == 'head':
fwd_trans = head_mri_t
else:
raise RuntimeError(error_msg.format(fwd_frame))
else:
raise RuntimeError(error_msg.format(fwd['coord_frame']))
for actor in _plot_forward(
self._renderer, fwd, fwd_trans,
fwd_scale=1e3 if self._units == 'mm' else 1,
scale=scale, alpha=alpha):
self._add_actor('forward', actor)
self._renderer._update()
def remove_forward(self):
"""Remove forward sources from the rendered scene."""
self._remove('forward', render=True)
@fill_doc
def add_dipole(self, dipole, trans, colors='red', alpha=1, scales=None):
"""Add a quiver to render positions of dipoles.
Parameters
----------
dipole : instance of Dipole
Dipole object containing position, orientation and amplitude of
one or more dipoles or in the forward solution.
%(trans_not_none)s
colors : list | matplotlib-style color | None
A single color or list of anything matplotlib accepts:
string, RGB, hex, etc. Default red.
%(alpha)s Default 1.
scales : list | float | None
The size of the arrow representing the dipole in
:class:`mne.viz.Brain` units. Default 5mm.
Notes
-----
.. versionadded:: 1.0
"""
head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0]
del trans
n_dipoles = len(dipole)
if not isinstance(colors, (list, tuple)):
colors = [colors] * n_dipoles # make into list
if len(colors) != n_dipoles:
raise ValueError(f'The number of colors ({len(colors)}) '
f'and dipoles ({n_dipoles}) must match')
colors = [_to_rgb(color, name=f'colors[{ci}]')
for ci, color in enumerate(colors)]
if scales is None:
scales = 5 if self._units == 'mm' else 5e-3
if not isinstance(scales, (list, tuple)):
scales = [scales] * n_dipoles # make into list
if len(scales) != n_dipoles:
raise ValueError(f'The number of scales ({len(scales)}) '
f'and dipoles ({n_dipoles}) must match')
pos = apply_trans(head_mri_t, dipole.pos)
pos *= 1e3 if self._units == 'mm' else 1
for _ in self._iter_views('vol'):
for this_pos, this_ori, color, scale in zip(
pos, dipole.ori, colors, scales):
actor, _ = self._renderer.quiver3d(
*this_pos, *this_ori, color=color, opacity=alpha,
mode='arrow', scale=scale)
self._add_actor('dipole', actor)
self._renderer._update()
def remove_dipole(self):
"""Remove dipole objects from the rendered scene."""
self._remove('dipole', render=True)
@fill_doc
def add_head(self, dense=True, color='gray', alpha=0.5):
"""Add a mesh to render the outer head surface.
Parameters
----------
dense : bool
Whether to plot the dense head (``seghead``) or the less dense head
(``head``).
%(color_matplotlib)s
%(alpha)s
Notes
-----
.. versionadded:: 0.24
"""
# load head
surf = _get_head_surface('seghead' if dense else 'head',
self._subject, self._subjects_dir)
verts, triangles = surf['rr'], surf['tris']
verts *= 1e3 if self._units == 'mm' else 1
color = _to_rgb(color)
for _ in self._iter_views('vol'):
actor, _ = self._renderer.mesh(
*verts.T, triangles=triangles, color=color,
opacity=alpha, reset_camera=False, render=False)
self._add_actor('head', actor)
self._renderer._update()
def remove_head(self):
"""Remove head objects from the rendered scene."""
self._remove('head', render=True)
@fill_doc
def add_skull(self, outer=True, color='gray', alpha=0.5):
"""Add a mesh to render the skull surface.
Parameters
----------
outer : bool
Adds the outer skull if ``True``, otherwise adds the inner skull.
%(color_matplotlib)s
%(alpha)s
Notes
-----
.. versionadded:: 0.24
"""
surf = _get_skull_surface('outer' if outer else 'inner',
self._subject, self._subjects_dir)
verts, triangles = surf['rr'], surf['tris']
verts *= 1e3 if self._units == 'mm' else 1
color = _to_rgb(color)
for _ in self._iter_views('vol'):
actor, _ = self._renderer.mesh(
*verts.T, triangles=triangles, color=color,
opacity=alpha, reset_camera=False, render=False)
self._add_actor('skull', actor)
self._renderer._update()
def remove_skull(self):
"""Remove skull objects from the rendered scene."""
self._remove('skull', render=True)
@fill_doc
def add_volume_labels(self, aseg='aparc+aseg', labels=None, colors=None,
alpha=0.5, smooth=0.9, fill_hole_size=None,
legend=None):
"""Add labels to the rendering from an anatomical segmentation.
Parameters
----------
%(aseg)s
labels : list
Labeled regions of interest to plot. See
:func:`mne.get_montage_volume_labels`
for one way to determine regions of interest. Regions can also be
chosen from the :term:`FreeSurfer LUT`.
colors : list | matplotlib-style color | None
A list of anything matplotlib accepts: string, RGB, hex, etc.
(default :term:`FreeSurfer LUT` colors).
%(alpha)s
%(smooth)s
fill_hole_size : int | None
The size of holes to remove in the mesh in voxels. Default is None,
no holes are removed. Warning, this dilates the boundaries of the
surface by ``fill_hole_size`` number of voxels so use the minimal
size.
legend : bool | None | dict
Add a legend displaying the names of the ``labels``. Default (None)
is ``True`` if the number of ``labels`` is 10 or fewer.
Can also be a dict of ``kwargs`` to pass to
:meth:`pyvista.Plotter.add_legend`.
Notes
-----
.. versionadded:: 0.24
"""
import nibabel as nib
# load anatomical segmentation image
if not aseg.endswith('aseg'):
raise RuntimeError(
f'`aseg` file path must end with "aseg", got {aseg}')
aseg = _check_fname(op.join(self._subjects_dir, self._subject,
'mri', aseg + '.mgz'),
overwrite='read', must_exist=True)
aseg_fname = aseg
aseg = nib.load(aseg_fname)
aseg_data = np.asarray(aseg.dataobj)
vox_mri_t = aseg.header.get_vox2ras_tkr()
mult = 1e-3 if self._units == 'm' else 1
vox_mri_t[:3] *= mult
del aseg
# read freesurfer lookup table
lut, fs_colors = read_freesurfer_lut()
if labels is None: # assign default ROI labels based on indices
lut_r = {v: k for k, v in lut.items()}
labels = [lut_r[idx] for idx in DEFAULTS['volume_label_indices']]
_validate_type(fill_hole_size, (int, None), 'fill_hole_size')
_validate_type(legend, (bool, None, dict), 'legend')
if legend is None:
legend = len(labels) < 11
if colors is None:
colors = [fs_colors[label] / 255 for label in labels]
elif not isinstance(colors, (list, tuple)):
colors = [colors] * len(labels) # make into list
colors = [_to_rgb(color, name=f'colors[{ci}]')
for ci, color in enumerate(colors)]
surfs = _marching_cubes(
aseg_data, [lut[label] for label in labels], smooth=smooth,
fill_hole_size=fill_hole_size)
for label, color, (verts, triangles) in zip(labels, colors, surfs):
if len(verts) == 0: # not in aseg vals
warn(f'Value {lut[label]} not found for label '
f'{repr(label)} in: {aseg_fname}')
continue
verts = apply_trans(vox_mri_t, verts)
for _ in self._iter_views('vol'):
actor, _ = self._renderer.mesh(
*verts.T, triangles=triangles, color=color,
opacity=alpha, reset_camera=False, render=False)
self._add_actor('volume_labels', actor)
if legend or isinstance(legend, dict):
# use empty kwargs for legend = True
legend = legend if isinstance(legend, dict) else dict()
self._renderer.plotter.add_legend(
list(zip(labels, colors)), **legend)
self._renderer._update()
def remove_volume_labels(self):
"""Remove the volume labels from the rendered scene."""
self._remove('volume_labels', render=True)
self._renderer.plotter.remove_legend()
@fill_doc
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None, resolution=50):
"""Add spherical foci, possibly mapping to displayed surf.
The foci spheres can be displayed at the coordinates given, or
mapped through a surface geometry. In other words, coordinates
from a volume-based analysis in MNI space can be displayed on an
inflated average surface by finding the closest vertex on the
white surface and mapping to that vertex on the inflated mesh.
Parameters
----------
coords : ndarray, shape (n_coords, 3)
Coordinates in stereotaxic space (default) or array of
vertex ids (with ``coord_as_verts=True``).
coords_as_verts : bool
Whether the coords parameter should be interpreted as vertex ids.
map_surface : str | None
Surface to project the coordinates to, or None to use raw coords.
When set to a surface, each foci is positioned at the closest
vertex in the mesh.
scale_factor : float
Controls the size of the foci spheres (relative to 1cm).
%(color_matplotlib)s
%(alpha)s Default is 1.
name : str
Internal name to use.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, an error will
be thrown.
resolution : int
The resolution of the spheres.
"""
hemi = self._check_hemi(hemi, extras=['vol'])
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
map_surface = None
# Possibly map the foci coords through a surface
if map_surface is not None:
from scipy.spatial.distance import cdist
foci_surf = _Surface(self._subject, hemi, map_surface,
self._subjects_dir, offset=0,
units=self._units, x_dir=self._rigid[0, :3])
foci_surf.load_geometry()
foci_vtxs = np.argmin(cdist(foci_surf.coords, coords), axis=0)
coords = self.geo[hemi].coords[foci_vtxs]
# Convert the color code
color = _to_rgb(color)
if self._units == 'm':
scale_factor = scale_factor / 1000.
for _, _, v in self._iter_views(hemi):
self._renderer.sphere(center=coords, color=color,
scale=(10. * scale_factor),
opacity=alpha, resolution=resolution)
self._renderer.set_camera(**views_dicts[hemi][v])
# Store the foci in the Brain._data dictionary
data_foci = coords
if 'foci' in self._data.get(hemi, []):
data_foci = np.vstack((self._data[hemi]['foci'], data_foci))
self._data[hemi] = self._data.get(hemi, dict()) # no data added yet
self._data[hemi]['foci'] = data_foci
@verbose
def add_sensors(self, info, trans, meg=None, eeg='original', fnirs=True,
ecog=True, seeg=True, dbs=True, verbose=None):
"""Add mesh objects to represent sensor positions.
Parameters
----------
%(info_not_none)s
%(trans_not_none)s
%(meg)s
%(eeg)s
%(fnirs)s
%(ecog)s
%(seeg)s
%(dbs)s
%(verbose)s
Notes
-----
.. versionadded:: 0.24
"""
_validate_type(info, Info, 'info')
meg, eeg, fnirs, warn_meg = _handle_sensor_types(meg, eeg, fnirs)
picks = pick_types(info, meg=('sensors' in meg),
ref_meg=('ref' in meg), eeg=(len(eeg) > 0),
ecog=ecog, seeg=seeg, dbs=dbs,
fnirs=(len(fnirs) > 0))
head_mri_t = _get_trans(trans, 'head', 'mri', allow_none=False)[0]
del trans
# get transforms to "mri"window
to_cf_t = _get_transforms_to_coord_frame(
info, head_mri_t, coord_frame='mri')
if pick_types(info, eeg=True, exclude=()).size > 0 and \
'projected' in eeg:
head_surf = _get_head_surface(
'seghead', self._subject, self._subjects_dir)
else:
head_surf = None
# Do the main plotting
for _ in self._iter_views('vol'):
if picks.size > 0:
sensors_actors = _plot_sensors(
self._renderer, info, to_cf_t, picks, meg, eeg,
fnirs, warn_meg, head_surf, self._units)
for item, actors in sensors_actors.items():
for actor in actors:
self._add_actor(item, actor)
if 'helmet' in meg and pick_types(info, meg=True).size > 0:
surf = get_meg_helmet_surf(info, head_mri_t)
verts = surf['rr'] * (1 if self._units == 'm' else 1e3)
actor, _ = self._renderer.mesh(
*verts.T, surf['tris'],
color=DEFAULTS['coreg']['helmet_color'],
opacity=0.25, reset_camera=False, render=False)
self._add_actor('helmet', actor)
self._renderer._update()
def remove_sensors(self, kind=None):
"""Remove sensors from the rendered scene.
Parameters
----------
kind : str | list | None
If None, removes all sensor-related data including the helmet.
Can be "meg", "eeg", "fnirs", "ecog", "seeg", "dbs" or "helmet"
to remove that item.
"""
all_kinds = ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet')
if kind is None:
for item in all_kinds:
self._remove(item, render=False)
else:
if isinstance(kind, str):
kind = [kind]
for this_kind in kind:
_check_option('kind', this_kind, all_kinds)
self._remove(this_kind, render=False)
self._renderer._update()
def add_text(self, x, y, text, name=None, color=None, opacity=1.0,
row=0, col=0, font_size=None, justification=None):
"""Add a text to the visualization.
Parameters
----------
x : float
X coordinate.
y : float
Y coordinate.
text : str
Text to add.
name : str
Name of the text (text label can be updated using update_text()).
color : tuple
Color of the text. Default is the foreground color set during
initialization (default is black or white depending on the
background color).
opacity : float
Opacity of the text (default 1.0).
row : int | None
Row index of which brain to use. Default is the top row.
col : int | None
Column index of which brain to use. Default is the left-most
column.
font_size : float | None
The font size to use.
justification : str | None
The text justification.
"""
_validate_type(name, (str, None), 'name')
name = text if name is None else name
if 'text' in self._actors and name in self._actors['text']:
raise ValueError(f'Text with the name {name} already exists')
for ri, ci, _ in self._iter_views('vol'):
if (row is None or row == ri) and (col is None or col == ci):
actor = self._renderer.text2d(
x_window=x, y_window=y, text=text, color=color,
size=font_size, justification=justification)
if 'text' not in self._actors:
self._actors['text'] = dict()
self._actors['text'][name] = actor
def remove_text(self, name=None):
"""Remove text from the rendered scene.
Parameters
----------
name : str | None
Remove specific text by name. If None, all text will be removed.
"""
_validate_type(name, (str, None), 'name')
if name is None:
for actor in self._actors['text'].values():
self._renderer.plotter.remove_actor(actor, render=False)
self._actors.pop('text')
else:
names = [None]
if 'text' in self._actors:
names += list(self._actors['text'].keys())
_check_option('name', name, names)
self._renderer.plotter.remove_actor(
self._actors['text'][name], render=False)
self._actors['text'].pop(name)
self._renderer._update()
def _configure_label_time_course(self):
from ...label import read_labels_from_annot
if not self.show_traces:
return
if self.mpl_canvas is None:
self._configure_mplcanvas()
else:
self.clear_glyphs()
self.traces_mode = 'label'
self.add_annotation(self.annot, color="w", alpha=0.75)
# now plot the time line
self.plot_time_line(update=False)
self.mpl_canvas.update_plot()
for hemi in self._hemis:
labels = read_labels_from_annot(
subject=self._subject,
parc=self.annot,
hemi=hemi,
subjects_dir=self._subjects_dir
)
self._vertex_to_label_id[hemi] = np.full(
self.geo[hemi].coords.shape[0], -1)
self._annotation_labels[hemi] = labels
for idx, label in enumerate(labels):
self._vertex_to_label_id[hemi][label.vertices] = idx
@fill_doc
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True, color=None):
"""Add an annotation file.
Parameters
----------
annot : str | tuple
Either path to annotation file or annotation name. Alternatively,
the annotation can be specified as a ``(labels, ctab)`` tuple per
hemisphere, i.e. ``annot=(labels, ctab)`` for a single hemisphere
or ``annot=((lh_labels, lh_ctab), (rh_labels, rh_ctab))`` for both
hemispheres. ``labels`` and ``ctab`` should be arrays as returned
by :func:`nibabel.freesurfer.io.read_annot`.
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
%(alpha)s Default is 1.
hemi : str | None
If None, it is assumed to belong to the hemisphere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
color : matplotlib-style color code
If used, show all annotations in the same (specified) color.
Probably useful only when showing annotation borders.
"""
from ...label import _read_annot
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if isinstance(annot, str):
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, op.join(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [op.join(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = op.join(self._subjects_dir,
self._subject,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
annots = []
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = _read_annot(filepath)
annots.append((labels, cmap))
else:
annots = [annot] if len(hemis) == 1 else annot
annot = 'annotation'
for hemi, (labels, cmap) in zip(hemis, annots):
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
cmap[:, 3] = 255
bgcolor = np.round(np.array(self._brain_color) * 255).astype(int)
bgcolor[-1] = 0
cmap[cmap[:, 4] < 0, 4] += 2 ** 24 # wrap to positive
cmap[cmap[:, 4] <= 0, :4] = bgcolor
if np.any(labels == 0) and not np.any(cmap[:, -1] <= 0):
cmap = np.vstack((cmap, np.concatenate([bgcolor, [0]])))
# Set label ids sensibly
order = np.argsort(cmap[:, -1])
cmap = cmap[order]
ids = np.searchsorted(cmap[:, -1], labels)
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
# Override the cmap when a single color is used
if color is not None:
rgb = np.round(np.multiply(_to_rgb(color), 255))
cmap[:, :3] = rgb.astype(cmap.dtype)
ctable = cmap.astype(np.float64)
for _ in self._iter_views(hemi):
mesh = self._layered_meshes[hemi]
mesh.add_overlay(
scalars=ids,
colormap=ctable,
rng=[np.min(ids), np.max(ids)],
opacity=alpha,
name=annot,
)
self._annots[hemi].append(annot)
if not self.time_viewer or self.traces_mode == 'vertex':
self._renderer._set_colormap_range(
mesh._actor, cmap.astype(np.uint8), None)
self._renderer._update()
def close(self):
"""Close all figures and cleanup data structure."""
self._closed = True
self._renderer.close()
def show(self):
"""Display the window."""
from ..backends._utils import _qt_app_exec
self._renderer.show()
if self._block:
_qt_app_exec(self._renderer.figure.store["app"])
@fill_doc
def get_view(self, row=0, col=0):
"""Get the camera orientation for a given subplot display.
Parameters
----------
row : int
The row to use, default is the first one.
col : int
The column to check, the default is the first one.
Returns
-------
%(roll)s
%(distance)s
%(azimuth)s
%(elevation)s
%(focalpoint)s
"""
row = _ensure_int(row, 'row')
col = _ensure_int(col, 'col')
for h in self._hemis:
for ri, ci, _ in self._iter_views(h):
if (row == ri) and (col == ci):
return self._renderer.get_camera()
return (None,) * 5
@fill_doc
def show_view(self, view=None, roll=None, distance=None, *,
row=None, col=None, hemi=None, align=True,
azimuth=None, elevation=None, focalpoint=None):
"""Orient camera to display view.
Parameters
----------
%(view)s
%(roll)s
%(distance)s
row : int | None
The row to set. Default all rows.
col : int | None
The column to set. Default all columns.
hemi : str | None
Which hemi to use for view lookup (when in "both" mode).
%(align_view)s
%(azimuth)s
%(elevation)s
%(focalpoint)s
Notes
-----
The builtin string views are the following perspectives, based on the
:term:`RAS` convention. If not otherwise noted, the view will have the
top of the brain (superior, +Z) in 3D space shown upward in the 2D
perspective:
``'lateral'``
From the left or right side such that the lateral (outside)
surface of the given hemisphere is visible.
``'medial'``
From the left or right side such that the medial (inside)
surface of the given hemisphere is visible (at least when in split
or single-hemi mode).
``'rostral'``
From the front.
``'caudal'``
From the rear.
``'dorsal'``
From above, with the front of the brain pointing up.
``'ventral'``
From below, with the front of the brain pointing up.
``'frontal'``
From the front and slightly lateral, with the brain slightly
tilted forward (yielding a view from slightly above).
``'parietal'``
From the rear and slightly lateral, with the brain slightly tilted
backward (yielding a view from slightly above).
``'axial'``
From above with the brain pointing up (same as ``'dorsal'``).
``'sagittal'``
From the right side.
``'coronal'``
From the rear.
Three letter abbreviations (e.g., ``'lat'``) of all of the above are
also supported.
"""
_validate_type(row, ('int-like', None), 'row')
_validate_type(col, ('int-like', None), 'col')
hemi = self._hemi if hemi is None else hemi
if hemi == 'split':
if (self._view_layout == 'vertical' and col == 1 or
self._view_layout == 'horizontal' and row == 1):
hemi = 'rh'
else:
hemi = 'lh'
_validate_type(view, (str, None), 'view')
view_params = dict(azimuth=azimuth, elevation=elevation, roll=roll,
distance=distance, focalpoint=focalpoint)
if view is not None: # view_params take precedence
view_params = {param: val for param, val in view_params.items()
if val is not None} # no overwriting with None
view_params = dict(views_dicts[hemi].get(view), **view_params)
xfm = self._rigid if align else None
for h in self._hemis:
for ri, ci, _ in self._iter_views(h):
if (row is None or row == ri) and (col is None or col == ci):
self._renderer.set_camera(
**view_params, reset_camera=False, rigid=xfm)
self._renderer._update()
def reset_view(self):
"""Reset the camera."""
for h in self._hemis:
for _, _, v in self._iter_views(h):
self._renderer.set_camera(**views_dicts[h][v],
reset_camera=False)
def save_image(self, filename=None, mode='rgb'):
"""Save view from all panels to disk.
Parameters
----------
filename : str
Path to new image file.
mode : str
Either 'rgb' or 'rgba' for values to return.
"""
if filename is None:
filename = _generate_default_filename(".png")
_save_ndarray_img(
filename, self.screenshot(mode=mode, time_viewer=True))
@fill_doc
def screenshot(self, mode='rgb', time_viewer=False):
"""Generate a screenshot of current view.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
%(time_viewer_brain_screenshot)s
Returns
-------
screenshot : array
Image pixel values.
"""
n_channels = 3 if mode == 'rgb' else 4
img = self._renderer.screenshot(mode)
logger.debug(f'Got screenshot of size {img.shape}')
if time_viewer and self.time_viewer and \
self.show_traces and \
not self.separate_canvas:
from matplotlib.image import imread
canvas = self.mpl_canvas.fig.canvas
canvas.draw_idle()
fig = self.mpl_canvas.fig
with BytesIO() as output:
# Need to pass dpi here so it uses the physical (HiDPI) DPI
# rather than logical DPI when saving in most cases.
# But when matplotlib uses HiDPI and VTK doesn't
# (e.g., macOS w/Qt 5.14+ and VTK9) then things won't work,
# so let's just calculate the DPI we need to get
# the correct size output based on the widths being equal
size_in = fig.get_size_inches()
dpi = fig.get_dpi()
want_size = tuple(x * dpi for x in size_in)
n_pix = want_size[0] * want_size[1]
logger.debug(
f'Saving figure of size {size_in} @ {dpi} DPI '
f'({want_size} = {n_pix} pixels)')
# Sometimes there can be off-by-one errors here (e.g.,
# if in mpl int() rather than int(round()) is used to
# compute the number of pixels) so rather than use "raw"
# format and try to reshape ourselves, just write to PNG
# and read it, which has the dimensions encoded for us.
fig.savefig(output, dpi=dpi, format='png',
facecolor=self._bg_color, edgecolor='none')
output.seek(0)
trace_img = imread(output, format='png')[:, :, :n_channels]
trace_img = np.clip(
np.round(trace_img * 255), 0, 255).astype(np.uint8)
bgcolor = np.array(self._brain_color[:n_channels]) / 255
img = concatenate_images([img, trace_img], bgcolor=bgcolor,
n_channels=n_channels)
return img
@contextlib.contextmanager
def _no_lut_update(self, why):
orig = self._lut_locked
self._lut_locked = why
try:
yield
finally:
self._lut_locked = orig
@fill_doc
def update_lut(self, fmin=None, fmid=None, fmax=None, alpha=None):
"""Update color map.
Parameters
----------
%(fmin_fmid_fmax)s
%(alpha)s
"""
args = f'{fmin}, {fmid}, {fmax}, {alpha}'
if self._lut_locked is not None:
logger.debug(f'LUT update postponed with {args}')
return
logger.debug(f'Updating LUT with {args}')
center = self._data['center']
colormap = self._data['colormap']
transparent = self._data['transparent']
lims = {key: self._data[key] for key in ('fmin', 'fmid', 'fmax')}
_update_monotonic(lims, fmin=fmin, fmid=fmid, fmax=fmax)
assert all(val is not None for val in lims.values())
self._data.update(lims)
self._data['ctable'] = np.round(
calculate_lut(colormap, alpha=1., center=center,
transparent=transparent, **lims) *
255).astype(np.uint8)
# update our values
rng = self._cmap_range
ctable = self._data['ctable']
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
mesh.update_overlay(name='data',
colormap=self._data['ctable'],
opacity=alpha,
rng=rng)
self._renderer._set_colormap_range(
mesh._actor, ctable, self._scalar_bar, rng,
self._brain_color)
grid_volume_pos = hemi_data.get('grid_volume_pos')
grid_volume_neg = hemi_data.get('grid_volume_neg')
for grid_volume in (grid_volume_pos, grid_volume_neg):
if grid_volume is not None:
self._renderer._set_volume_range(
grid_volume, ctable, hemi_data['alpha'],
self._scalar_bar, rng)
glyph_actor = hemi_data.get('glyph_actor')
if glyph_actor is not None:
for glyph_actor_ in glyph_actor:
self._renderer._set_colormap_range(
glyph_actor_, ctable, self._scalar_bar, rng)
if self.time_viewer:
with self._no_lut_update(f'update_lut {args}'):
for key in ('fmin', 'fmid', 'fmax'):
self.callbacks[key](lims[key])
self._renderer._update()
def set_data_smoothing(self, n_steps):
"""Set the number of smoothing steps.
Parameters
----------
n_steps : int
Number of smoothing steps.
"""
from ...morph import _hemi_morph
for hemi in ['lh', 'rh']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
if len(hemi_data['array']) >= self.geo[hemi].x.shape[0]:
continue
vertices = hemi_data['vertices']
if vertices is None:
raise ValueError(
'len(data) < nvtx (%s < %s): the vertices '
'parameter must not be None'
% (len(hemi_data), self.geo[hemi].x.shape[0]))
morph_n_steps = 'nearest' if n_steps == -1 else n_steps
with use_log_level(False):
smooth_mat = _hemi_morph(
self.geo[hemi].orig_faces,
np.arange(len(self.geo[hemi].coords)),
vertices, morph_n_steps, maps=None, warn=False)
self._data[hemi]['smooth_mat'] = smooth_mat
self.set_time_point(self._data['time_idx'])
self._data['smoothing_steps'] = n_steps
@property
def _n_times(self):
return len(self._times) if self._times is not None else None
@property
def time_interpolation(self):
"""The interpolation mode."""
return self._time_interpolation
@fill_doc
def set_time_interpolation(self, interpolation):
"""Set the interpolation mode.
Parameters
----------
%(interpolation_brain_time)s
"""
self._time_interpolation = _check_option(
'interpolation',
interpolation,
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic')
)
self._time_interp_funcs = dict()
self._time_interp_inv = None
if self._times is not None:
idx = np.arange(self._n_times)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
self._time_interp_funcs[hemi] = _safe_interp1d(
idx, array, self._time_interpolation, axis=-1,
assume_sorted=True)
self._time_interp_inv = _safe_interp1d(idx, self._times)
def set_time_point(self, time_idx):
"""Set the time point shown (can be a float to interpolate).
Parameters
----------
time_idx : int | float
The time index to use. Can be a float to use interpolation
between indices.
"""
self._current_act_data = dict()
time_actor = self._data.get('time_actor', None)
time_label = self._data.get('time_label', None)
for hemi in ['lh', 'rh', 'vol']:
hemi_data = self._data.get(hemi)
if hemi_data is not None:
array = hemi_data['array']
# interpolate in time
vectors = None
if array.ndim == 1:
act_data = array
self._current_time = 0
else:
act_data = self._time_interp_funcs[hemi](time_idx)
self._current_time = self._time_interp_inv(time_idx)
if array.ndim == 3:
vectors = act_data
act_data = np.linalg.norm(act_data, axis=1)
self._current_time = self._time_interp_inv(time_idx)
self._current_act_data[hemi] = act_data
if time_actor is not None and time_label is not None:
time_actor.SetInput(time_label(self._current_time))
# update the volume interpolation
grid = hemi_data.get('grid')
if grid is not None:
vertices = self._data['vol']['vertices']
values = self._current_act_data['vol']
rng = self._cmap_range
fill = 0 if self._data['center'] is not None else rng[0]
grid.cell_data['values'].fill(fill)
# XXX for sided data, we probably actually need two
# volumes as composite/MIP needs to look at two
# extremes... for now just use abs. Eventually we can add
# two volumes if we want.
grid.cell_data['values'][vertices] = values
# interpolate in space
smooth_mat = hemi_data.get('smooth_mat')
if smooth_mat is not None:
act_data = smooth_mat.dot(act_data)
# update the mesh scalar values
if hemi in self._layered_meshes:
mesh = self._layered_meshes[hemi]
if 'data' in mesh._overlays:
mesh.update_overlay(name='data', scalars=act_data)
else:
mesh.add_overlay(
scalars=act_data,
colormap=self._data['ctable'],
rng=self._cmap_range,
opacity=None,
name='data',
)
# update the glyphs
if vectors is not None:
self._update_glyphs(hemi, vectors)
self._data['time_idx'] = time_idx
self._renderer._update()
def set_time(self, time):
"""Set the time to display (in seconds).
Parameters
----------
time : float
The time to show, in seconds.
"""
if self._times is None:
raise ValueError(
'Cannot set time when brain has no defined times.')
elif min(self._times) <= time <= max(self._times):
self.set_time_point(np.interp(float(time), self._times,
np.arange(self._n_times)))
else:
raise ValueError(
f'Requested time ({time} s) is outside the range of '
f'available times ({min(self._times)}-{max(self._times)} s).')
def _update_glyphs(self, hemi, vectors):
hemi_data = self._data.get(hemi)
assert hemi_data is not None
vertices = hemi_data['vertices']
vector_alpha = self._data['vector_alpha']
scale_factor = self._data['scale_factor']
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self.geo[hemi].coords)[vertices].T
if hemi_data['glyph_actor'] is None:
add = True
hemi_data['glyph_actor'] = list()
else:
add = False
count = 0
for _ in self._iter_views(hemi):
if hemi_data['glyph_dataset'] is None:
glyph_mapper, glyph_dataset = self._renderer.quiver3d(
x, y, z,
vectors[:, 0], vectors[:, 1], vectors[:, 2],
color=None,
mode='2darrow',
scale_mode='vector',
scale=scale_factor,
opacity=vector_alpha,
name=str(hemi) + "_glyph"
)
hemi_data['glyph_dataset'] = glyph_dataset
hemi_data['glyph_mapper'] = glyph_mapper
else:
glyph_dataset = hemi_data['glyph_dataset']
glyph_dataset.point_data['vec'] = vectors
glyph_mapper = hemi_data['glyph_mapper']
if add:
glyph_actor = self._renderer._actor(glyph_mapper)
prop = glyph_actor.GetProperty()
prop.SetLineWidth(2.)
prop.SetOpacity(vector_alpha)
self._renderer.plotter.add_actor(glyph_actor, render=False)
hemi_data['glyph_actor'].append(glyph_actor)
else:
glyph_actor = hemi_data['glyph_actor'][count]
count += 1
self._renderer._set_colormap_range(
actor=glyph_actor,
ctable=self._data['ctable'],
scalar_bar=None,
rng=self._cmap_range,
)
@property
def _cmap_range(self):
dt_max = self._data['fmax']
if self._data['center'] is None:
dt_min = self._data['fmin']
else:
dt_min = -1 * dt_max
rng = [dt_min, dt_max]
return rng
def _update_fscale(self, fscale):
"""Scale the colorbar points."""
fmin = self._data['fmin'] * fscale
fmid = self._data['fmid'] * fscale
fmax = self._data['fmax'] * fscale
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _update_auto_scaling(self, restore=False):
user_clim = self._data['clim']
if user_clim is not None and 'lims' in user_clim:
allow_pos_lims = False
else:
allow_pos_lims = True
if user_clim is not None and restore:
clim = user_clim
else:
clim = 'auto'
colormap = self._data['colormap']
transparent = self._data['transparent']
mapdata = _process_clim(
clim, colormap, transparent,
np.concatenate(list(self._current_act_data.values())),
allow_pos_lims)
diverging = 'pos_lims' in mapdata['clim']
colormap = mapdata['colormap']
scale_pts = mapdata['clim']['pos_lims' if diverging else 'lims']
transparent = mapdata['transparent']
del mapdata
fmin, fmid, fmax = scale_pts
center = 0. if diverging else None
self._data['center'] = center
self._data['colormap'] = colormap
self._data['transparent'] = transparent
self.update_lut(fmin=fmin, fmid=fmid, fmax=fmax)
def _to_time_index(self, value):
"""Return the interpolated time index of the given time value."""
time = self._data['time']
value = np.interp(value, time, np.arange(len(time)))
return value
@property
def data(self):
"""Data used by time viewer and color bar widgets."""
return self._data
@property
def labels(self):
return self._labels
@property
def views(self):
return self._views
@property
def hemis(self):
return self._hemis
def _save_movie(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
import imageio
with self._renderer._disabled_interaction():
images = self._make_movie_frames(
time_dilation, tmin, tmax, framerate, interpolation, callback,
time_viewer)
# find imageio FFMPEG parameters
if 'fps' not in kwargs:
kwargs['fps'] = framerate
if codec is not None:
kwargs['codec'] = codec
if bitrate is not None:
kwargs['bitrate'] = bitrate
imageio.mimwrite(filename, images, **kwargs)
def _save_movie_tv(self, filename, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False,
**kwargs):
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.set_value(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self._renderer._status_bar_update()
else:
self.status_msg.set_value(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.set_range([0, n_frames - 1])
self.status_progress.set_value(frame)
self.status_progress.update()
self.status_msg.update()
self._renderer._status_bar_update()
# set cursor to busy
default_cursor = self._renderer._window_get_cursor()
self._renderer._window_set_cursor(
self._renderer._window_new_cursor("WaitCursor"))
try:
self._save_movie(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, frame_callback, time_viewer, **kwargs)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
finally:
self._renderer._window_set_cursor(default_cursor)
@fill_doc
def save_movie(self, filename=None, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation=None, codec=None,
bitrate=None, callback=None, time_viewer=False, **kwargs):
"""Save a movie (for data with a time axis).
The movie is created through the :mod:`imageio` module. The format is
determined by the extension, and additional options can be specified
through keyword arguments that depend on the format, see
:doc:`imageio's format page <imageio:formats/index>`.
.. Warning::
This method assumes that time is specified in seconds when adding
data. If time is specified in milliseconds this will result in
movies 1000 times longer than expected.
Parameters
----------
filename : str
Path at which to save the movie. The extension determines the
format (e.g., ``'*.mov'``, ``'*.gif'``, ...; see the :mod:`imageio`
documentation for available formats).
time_dilation : float
Factor by which to stretch time (default 4). For example, an epoch
from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this
would result in a 2.8 s long movie.
tmin : float
First time point to include (default: all data).
tmax : float
Last time point to include (default: all data).
framerate : float
Framerate of the movie (frames per second, default 24).
%(interpolation_brain_time)s
If None, it uses the current ``brain.interpolation``,
which defaults to ``'nearest'``. Defaults to None.
codec : str | None
The codec to use.
bitrate : float | None
The bitrate to use.
callback : callable | None
A function to call on each iteration. Useful for status message
updates. It will be passed keyword arguments ``frame`` and
``n_frames``.
%(time_viewer_brain_screenshot)s
**kwargs : dict
Specify additional options for :mod:`imageio`.
"""
if filename is None:
filename = _generate_default_filename(".mp4")
func = self._save_movie_tv if self.time_viewer else self._save_movie
func(filename, time_dilation, tmin, tmax,
framerate, interpolation, codec,
bitrate, callback, time_viewer, **kwargs)
def _make_movie_frames(self, time_dilation, tmin, tmax, framerate,
interpolation, callback, time_viewer):
from math import floor
# find tmin
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
# find indexes at which to create frames
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames, dtype=float)
times /= framerate * time_dilation
times += tmin
time_idx = np.interp(times, self._times, np.arange(self._n_times))
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
# Sometimes the first screenshot is rendered with a different
# resolution on OS X
self.screenshot(time_viewer=time_viewer)
old_mode = self.time_interpolation
if interpolation is not None:
self.set_time_interpolation(interpolation)
try:
images = [
self.screenshot(time_viewer=time_viewer)
for _ in self._iter_time(time_idx, callback)]
finally:
self.set_time_interpolation(old_mode)
if callback is not None:
callback(frame=len(time_idx), n_frames=len(time_idx))
return images
def _iter_time(self, time_idx, callback):
"""Iterate through time points, then reset to current time.
Parameters
----------
time_idx : array_like
Time point indexes through which to iterate.
callback : callable | None
Callback to call before yielding each frame.
Yields
------
idx : int | float
Current index.
Notes
-----
Used by movie and image sequence saving functions.
"""
if self.time_viewer:
func = partial(self.callbacks["time"],
update_widget=True)
else:
func = self.set_time_point
current_time_idx = self._data["time_idx"]
for ii, idx in enumerate(time_idx):
func(idx)
if callback is not None:
callback(frame=ii, n_frames=len(time_idx))
yield idx
# Restore original time index
func(current_time_idx)
def _check_stc(self, hemi, array, vertices):
from ...source_estimate import (
_BaseSourceEstimate, _BaseSurfaceSourceEstimate,
_BaseMixedSourceEstimate, _BaseVolSourceEstimate
)
if isinstance(array, _BaseSourceEstimate):
stc = array
stc_surf = stc_vol = None
if isinstance(stc, _BaseSurfaceSourceEstimate):
stc_surf = stc
elif isinstance(stc, _BaseMixedSourceEstimate):
stc_surf = stc.surface() if hemi != 'vol' else None
stc_vol = stc.volume() if hemi == 'vol' else None
elif isinstance(stc, _BaseVolSourceEstimate):
stc_vol = stc if hemi == 'vol' else None
else:
raise TypeError("stc not supported")
if stc_surf is None and stc_vol is None:
raise ValueError("No data to be added")
if stc_surf is not None:
array = getattr(stc_surf, hemi + '_data')
vertices = stc_surf.vertices[0 if hemi == 'lh' else 1]
if stc_vol is not None:
array = stc_vol.data
vertices = np.concatenate(stc_vol.vertices)
else:
stc = None
return stc, array, vertices
def _check_hemi(self, hemi, extras=()):
"""Check for safe single-hemi input, returns str."""
_validate_type(hemi, (None, str), 'hemi')
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
hemi = self._hemi
_check_option('hemi', hemi, ('lh', 'rh') + tuple(extras))
return hemi
def _check_hemis(self, hemi):
"""Check for safe dual or single-hemi input, returns list."""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _to_borders(self, label, hemi, borders, restrict_idx=None):
"""Convert a label/parc to borders."""
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = mesh_edges(self.geo[hemi].orig_faces)
edges = edges.tocoo()
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int64)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(
self.geo[hemi].orig_faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].orig_faces.shape
keep_idx = self.geo[hemi].orig_faces[
np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def get_picked_points(self):
"""Return the vertices of the picked points.
Returns
-------
points : list of int | None
The vertices picked by the time viewer.
"""
if hasattr(self, "time_viewer"):
return self.picked_points
def __hash__(self):
"""Hash the object."""
return self._hash
def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False):
"""Work around interp1d not liking singleton dimensions."""
from scipy.interpolate import interp1d
if y.shape[axis] == 1:
def func(x):
return np.take(y, np.zeros(np.asarray(x).shape, int), axis=axis)
return func
else:
return interp1d(x, y, kind, axis=axis, assume_sorted=assume_sorted)
def _update_limits(fmin, fmid, fmax, center, array):
if center is None:
if fmin is None:
fmin = array.min() if array.size > 0 else 0
if fmax is None:
fmax = array.max() if array.size > 0 else 1
else:
if fmin is None:
fmin = 0
if fmax is None:
fmax = np.abs(center - array).max() if array.size > 0 else 1
if fmid is None:
fmid = (fmin + fmax) / 2.
if fmin >= fmid:
raise RuntimeError('min must be < mid, got %0.4g >= %0.4g'
% (fmin, fmid))
if fmid >= fmax:
raise RuntimeError('mid must be < max, got %0.4g >= %0.4g'
% (fmid, fmax))
return fmin, fmid, fmax
def _update_monotonic(lims, fmin, fmid, fmax):
if fmin is not None:
lims['fmin'] = fmin
if lims['fmax'] < fmin:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmin}')
lims['fmax'] = fmin
if lims['fmid'] < fmin:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmin}')
lims['fmid'] = fmin
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmid is not None:
lims['fmid'] = fmid
if lims['fmin'] > fmid:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmid}')
lims['fmin'] = fmid
if lims['fmax'] < fmid:
logger.debug(f' Bumping fmax = {lims["fmax"]} to {fmid}')
lims['fmax'] = fmid
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
if fmax is not None:
lims['fmax'] = fmax
if lims['fmin'] > fmax:
logger.debug(f' Bumping fmin = {lims["fmin"]} to {fmax}')
lims['fmin'] = fmax
if lims['fmid'] > fmax:
logger.debug(f' Bumping fmid = {lims["fmid"]} to {fmax}')
lims['fmid'] = fmax
assert lims['fmin'] <= lims['fmid'] <= lims['fmax']
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
class _FakeIren():
def EnterEvent(self):
pass
def MouseMoveEvent(self):
pass
def LeaveEvent(self):
pass
def SetEventInformation(self, *args, **kwargs):
pass
def CharEvent(self):
pass
def KeyPressEvent(self, *args, **kwargs):
pass
def KeyReleaseEvent(self, *args, **kwargs):
pass
| bsd-3-clause | b091a37be9c766a2ae58026eee2d88c3 | 39.715042 | 79 | 0.511506 | 4.101264 | false | false | false | false |
mattpap/sympy-polys | sympy/core/evalf.py | 1 | 38790 | """
Adaptive numerical evaluation of SymPy expressions, using mpmath
for mathematical functions.
"""
from sympy.mpmath.libmp import (from_int, from_rational, fzero, normalize,
bitcount, round_nearest, to_str, fone, fnone, fhalf, to_int, mpf_lt,
mpf_sqrt, mpf_cmp, mpf_abs, mpf_pow_int, mpf_shift, mpf_add, mpf_mul,
mpf_neg)
import sympy.mpmath.libmp as libmp
from sympy.mpmath.libmp.libmpf import dps_to_prec
from sympy.mpmath import mpf, mpc, quadts, quadosc, mp, make_mpf, make_mpc
from sympy.mpmath.libmp import (mpf_pi, mpf_log, mpf_pow, mpf_sin, mpf_cos,
mpf_atan, mpf_atan2, mpf_e, mpf_exp, from_man_exp, from_int)
from sympy.mpmath.libmp.backend import MPZ
from sympy.mpmath import nsum
from sympy.mpmath import inf as mpmath_inf
from sympy.mpmath.libmp.gammazeta import mpf_bernoulli
import math
from basic import C, S
from sympify import sympify
LG10 = math.log(10,2)
# Used in a few places as placeholder values to denote exponents and
# precision levels, e.g. of exact numbers. Must be careful to avoid
# passing these to mpmath functions or returning them in final results.
INF = 1e1000
MINUS_INF = -1e1000
# ~= 100 digits. Real men set this to INF.
DEFAULT_MAXPREC = 333
class PrecisionExhausted(ArithmeticError):
pass
#----------------------------------------------------------------------------#
# #
# Helper functions for arithmetic and complex parts #
# #
#----------------------------------------------------------------------------#
"""
An mpf value tuple is a tuple of integers (sign, man, exp, bc)
representing a floating-point number: (-1)**sign*man*2**exp where
bc should correspond to the number of bits used to represent the
mantissa (man) in binary notation, e.g. (0,5,1,3) represents 10::
>>> from sympy.core.evalf import bitcount
>>> n=(-1)**0 * 5 * 2**1; n, bitcount(5)
(10, 3)
A temporary result is a tuple (re, im, re_acc, im_acc) where
re and im are nonzero mpf value tuples representing approximate
numbers, or None to denote exact zeros.
re_acc, im_acc are integers denoting log2(e) where e is the estimated
relative accuracy of the respective complex part, but may be anything
if the corresponding complex part is None.
"""
def fastlog(x):
"""Fast approximation of log2(x) for an mpf value tuple x.
Notes: Calculated as exponent + width of mantissa. This is an
approximation for two reasons: 1) it gives the ceil(log2(abs(x)))
value and 2) it is too high by 1 in the case that x is an exact
power of 2. Although this is easy to remedy by testing to see if
the odd mpf mantissa is 1 (indicating that one was dealing with
an exact power of 2) that would decrease the speed and is not
necessary as this is only being used as an approximation for the
number of bits in x. The correct return value could be written as
"x[2] + (x[3] if x[1] != 1 else 0)".
Since mpf tuples always have an odd mantissa, no check is done
to see if the mantissa is a multiple of 2 (in which case the
result would be too large by 1).
Example::
>>> from sympy import log
>>> from sympy.core.evalf import fastlog, bitcount
>>> n=(-1)**0*5*2**1; n, (log(n)/log(2)).evalf(), fastlog((0,5,1,bitcount(5)))
(10, 3.32192809488736, 4)
"""
if not x or x == fzero:
return MINUS_INF
return x[2] + x[3]
def complex_accuracy(result):
"""
Returns relative accuracy of a complex number with given accuracies
for the real and imaginary parts. The relative accuracy is defined
in the complex norm sense as ||z|+|error|| / |z| where error
is equal to (real absolute error) + (imag absolute error)*i.
The full expression for the (logarithmic) error can be approximated
easily by using the max norm to approximate the complex norm.
In the worst case (re and im equal), this is wrong by a factor
sqrt(2), or by log2(sqrt(2)) = 0.5 bit.
"""
re, im, re_acc, im_acc = result
if not im:
if not re:
return INF
return re_acc
if not re:
return im_acc
re_size = fastlog(re)
im_size = fastlog(im)
absolute_error = max(re_size-re_acc, im_size-im_acc)
relative_error = absolute_error - max(re_size, im_size)
return -relative_error
def get_abs(expr, prec, options):
re, im, re_acc, im_acc = evalf(expr, prec+2, options)
if not re:
re, re_acc, im, im_acc = im, im_acc, re, re_acc
if im:
return libmp.mpc_abs((re, im), prec), None, re_acc, None
else:
return mpf_abs(re), None, re_acc, None
def get_complex_part(expr, no, prec, options):
"""no = 0 for real part, no = 1 for imaginary part"""
workprec = prec
i = 0
while 1:
res = evalf(expr, workprec, options)
value, accuracy = res[no::2]
if (not value) or accuracy >= prec:
return value, None, accuracy, None
workprec += max(30, 2**i)
i += 1
def evalf_abs(expr, prec, options):
return get_abs(expr.args[0], prec, options)
def evalf_re(expr, prec, options):
return get_complex_part(expr.args[0], 0, prec, options)
def evalf_im(expr, prec, options):
return get_complex_part(expr.args[0], 1, prec, options)
def finalize_complex(re, im, prec):
assert re and im
if re == fzero and im == fzero:
raise ValueError("got complex zero with unknown accuracy")
size_re = fastlog(re)
size_im = fastlog(im)
# Convert fzeros to scaled zeros
if re == fzero:
re = mpf_shift(fone, size_im-prec)
size_re = fastlog(re)
elif im == fzero:
im = mpf_shift(fone, size_re-prec)
size_im = fastlog(im)
if size_re > size_im:
re_acc = prec
im_acc = prec + min(-(size_re - size_im), 0)
else:
im_acc = prec
re_acc = prec + min(-(size_im - size_re), 0)
return re, im, re_acc, im_acc
def chop_parts(value, prec):
"""
Chop off tiny real or complex parts.
"""
re, im, re_acc, im_acc = value
# Method 1: chop based on absolute value
if re and (fastlog(re) < -prec+4):
re, re_acc = None, None
if im and (fastlog(im) < -prec+4):
im, im_acc = None, None
# Method 2: chop if inaccurate and relatively small
if re and im:
delta = fastlog(re) - fastlog(im)
if re_acc < 2 and (delta - re_acc <= -prec+4):
re, re_acc = None, None
if im_acc < 2 and (delta - im_acc >= prec-4):
im, im_acc = None, None
return re, im, re_acc, im_acc
def check_target(expr, result, prec):
a = complex_accuracy(result)
if a < prec:
raise PrecisionExhausted("Failed to distinguish the expression: \n\n%s\n\n"
"from zero. Try simplifying the input, using chop=True, or providing "
"a higher maxn for evalf" % (expr))
def get_integer_part(expr, no, options, return_ints=False):
"""
With no = 1, computes ceiling(expr)
With no = -1, computes floor(expr)
Note: this function either gives the exact result or signals failure.
"""
# The expression is likely less than 2^30 or so
assumed_size = 30
ire, iim, ire_acc, iim_acc = evalf(expr, assumed_size, options)
# We now know the size, so we can calculate how much extra precision
# (if any) is needed to get within the nearest integer
if ire and iim:
gap = max(fastlog(ire)-ire_acc, fastlog(iim)-iim_acc)
elif ire:
gap = fastlog(ire)-ire_acc
elif iim:
gap = fastlog(iim)-iim_acc
else:
# ... or maybe the expression was exactly zero
return None, None, None, None
margin = 10
if gap >= -margin:
ire, iim, ire_acc, iim_acc = evalf(expr, margin+assumed_size+gap, options)
# We can now easily find the nearest integer, but to find floor/ceil, we
# must also calculate whether the difference to the nearest integer is
# positive or negative (which may fail if very close)
def calc_part(expr, nexpr):
nint = int(to_int(nexpr, round_nearest))
expr = C.Add(expr, -nint, evaluate=False)
x, _, x_acc, _ = evalf(expr, 10, options)
check_target(expr, (x, None, x_acc, None), 3)
nint += int(no*(mpf_cmp(x or fzero, fzero) == no))
nint = from_int(nint)
return nint, fastlog(nint) + 10
re, im, re_acc, im_acc = None, None, None, None
if ire:
re, re_acc = calc_part(C.re(expr, evaluate=False), ire)
if iim:
im, im_acc = calc_part(C.im(expr, evaluate=False), iim)
if return_ints:
return int(to_int(re or fzero)), int(to_int(im or fzero))
return re, im, re_acc, im_acc
def evalf_ceiling(expr, prec, options):
return get_integer_part(expr.args[0], 1, options)
def evalf_floor(expr, prec, options):
return get_integer_part(expr.args[0], -1, options)
#----------------------------------------------------------------------------#
# #
# Arithmetic operations #
# #
#----------------------------------------------------------------------------#
def add_terms(terms, prec, target_prec):
"""
Helper for evalf_add. Adds a list of (mpfval, accuracy) terms.
"""
if len(terms) == 1:
if not terms[0]:
# XXX: this is supposed to represent a scaled zero
return mpf_shift(fone, target_prec), -1
return terms[0]
max_extra_prec = 2*prec
sum_man, sum_exp, absolute_error = 0, 0, MINUS_INF
for x, accuracy in terms:
if not x:
continue
sign, man, exp, bc = x
if sign:
man = -man
absolute_error = max(absolute_error, bc+exp-accuracy)
delta = exp - sum_exp
if exp >= sum_exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not sum_man) or delta-bitcount(abs(sum_man)) > max_extra_prec):
sum_man = man
sum_exp = exp
else:
sum_man += (man << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-bc > max_extra_prec:
if not sum_man:
sum_man, sum_exp = man, exp
else:
sum_man = (sum_man << delta) + man
sum_exp = exp
if absolute_error == MINUS_INF:
return None, None
if not sum_man:
# XXX: this is supposed to represent a scaled zero
return mpf_shift(fone, absolute_error), -1
if sum_man < 0:
sum_sign = 1
sum_man = -sum_man
else:
sum_sign = 0
sum_bc = bitcount(sum_man)
sum_accuracy = sum_exp + sum_bc - absolute_error
r = normalize(sum_sign, sum_man, sum_exp, sum_bc, target_prec,
round_nearest), sum_accuracy
#print "returning", to_str(r[0],50), r[1]
return r
def evalf_add(v, prec, options):
args = v.args
target_prec = prec
i = 0
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
while 1:
terms = [evalf(arg, prec+10, options) for arg in args]
re, re_acc = add_terms([(a[0],a[2]) for a in terms if a[0]], prec, target_prec)
im, im_acc = add_terms([(a[1],a[3]) for a in terms if a[1]], prec, target_prec)
accuracy = complex_accuracy((re, im, re_acc, im_acc))
if accuracy >= target_prec:
if options.get('verbose'):
print "ADD: wanted", target_prec, "accurate bits, got", re_acc, im_acc
return re, im, re_acc, im_acc
else:
diff = target_prec - accuracy
if (prec-target_prec) > options.get('maxprec', DEFAULT_MAXPREC):
return re, im, re_acc, im_acc
prec = prec + max(10+2**i, diff)
options['maxprec'] = min(oldmaxprec, 2*prec)
if options.get('verbose'):
print "ADD: restarting with prec", prec
i += 1
finally:
options['maxprec'] = oldmaxprec
def evalf_mul(v, prec, options):
args = v.args
# With guard digits, multiplication in the real case does not destroy
# accuracy. This is also true in the complex case when considering the
# total accuracy; however accuracy for the real or imaginary parts
# separately may be lower.
acc = prec
target_prec = prec
# XXX: big overestimate
prec = prec + len(args) + 5
direction = 0
# Empty product is 1
man, exp, bc = MPZ(1), 0, 1
direction = 0
complex_factors = []
# First, we multiply all pure real or pure imaginary numbers.
# direction tells us that the result should be multiplied by
# i**direction
for arg in args:
re, im, re_acc, im_acc = evalf(arg, prec, options)
if re and im:
complex_factors.append((re, im, re_acc, im_acc))
continue
elif re:
(s, m, e, b), w_acc = re, re_acc
elif im:
(s, m, e, b), w_acc = im, im_acc
direction += 1
else:
return None, None, None, None
direction += 2*s
man *= m
exp += e
bc += b
if bc > 3*prec:
man >>= prec
exp += prec
acc = min(acc, w_acc)
sign = (direction & 2) >> 1
v = normalize(sign, man, exp, bitcount(man), prec, round_nearest)
if complex_factors:
# make existing real scalar look like an imaginary and
# multiply by the remaining complex numbers
re, im = v, (0, MPZ(0), 0, 0)
for wre, wim, wre_acc, wim_acc in complex_factors:
# acc is the overall accuracy of the product; we aren't
# computing exact accuracies of the product.
acc = min(acc,
complex_accuracy((wre, wim, wre_acc, wim_acc)))
A = mpf_mul(re, wre, prec)
B = mpf_mul(mpf_neg(im), wim, prec)
C = mpf_mul(re, wim, prec)
D = mpf_mul(im, wre, prec)
re, xre_acc = add_terms([(A, acc), (B, acc)], prec, target_prec)
im, xim_acc = add_terms([(C, acc), (D, acc)], prec, target_prec)
if options.get('verbose'):
print "MUL: wanted", target_prec, "accurate bits, got", acc
# multiply by i
if direction & 1:
return mpf_neg(im), re, acc, acc
else:
return re, im, acc, acc
else:
# multiply by i
if direction & 1:
return None, v, None, acc
else:
return v, None, acc, None
def evalf_pow(v, prec, options):
target_prec = prec
base, exp = v.args
# We handle x**n separately. This has two purposes: 1) it is much
# faster, because we avoid calling evalf on the exponent, and 2) it
# allows better handling of real/imaginary parts that are exactly zero
if exp.is_Integer:
p = exp.p
# Exact
if not p:
return fone, None, prec, None
# Exponentiation by p magnifies relative error by |p|, so the
# base must be evaluated with increased precision if p is large
prec += int(math.log(abs(p),2))
re, im, re_acc, im_acc = evalf(base, prec+5, options)
# Real to integer power
if re and not im:
return mpf_pow_int(re, p, target_prec), None, target_prec, None
# (x*I)**n = I**n * x**n
if im and not re:
z = mpf_pow_int(im, p, target_prec)
case = p % 4
if case == 0: return z, None, target_prec, None
if case == 1: return None, z, None, target_prec
if case == 2: return mpf_neg(z), None, target_prec, None
if case == 3: return None, mpf_neg(z), None, target_prec
# Zero raised to an integer power
if not re:
return None, None, None, None
# General complex number to arbitrary integer power
re, im = libmp.mpc_pow_int((re, im), p, prec)
# Assumes full accuracy in input
return finalize_complex(re, im, target_prec)
# Pure square root
if exp is S.Half:
xre, xim, xre_acc, yim_acc = evalf(base, prec+5, options)
# General complex square root
if xim:
re, im = libmp.mpc_sqrt((xre or fzero, xim), prec)
return finalize_complex(re, im, prec)
if not xre:
return None, None, None, None
# Square root of a negative real number
if mpf_lt(xre, fzero):
return None, mpf_sqrt(mpf_neg(xre), prec), None, prec
# Positive square root
return mpf_sqrt(xre, prec), None, prec, None
# We first evaluate the exponent to find its magnitude
# This determines the working precision that must be used
prec += 10
yre, yim, yre_acc, yim_acc = evalf(exp, prec, options)
# Special cases: x**0
if not (yre or yim):
return fone, None, prec, None
ysize = fastlog(yre)
# Restart if too big
# XXX: prec + ysize might exceed maxprec
if ysize > 5:
prec += ysize
yre, yim, yre_acc, yim_acc = evalf(exp, prec, options)
# Pure exponential function; no need to evalf the base
if base is S.Exp1:
if yim:
re, im = libmp.mpc_exp((yre or fzero, yim), prec)
return finalize_complex(re, im, target_prec)
return mpf_exp(yre, target_prec), None, target_prec, None
xre, xim, xre_acc, yim_acc = evalf(base, prec+5, options)
# 0**y
if not (xre or xim):
return None, None, None, None
# (real ** complex) or (complex ** complex)
if yim:
re, im = libmp.mpc_pow((xre or fzero, xim or fzero), (yre or fzero, yim),
target_prec)
return finalize_complex(re, im, target_prec)
# complex ** real
if xim:
re, im = libmp.mpc_pow_mpf((xre or fzero, xim), yre, target_prec)
return finalize_complex(re, im, target_prec)
# negative ** real
elif mpf_lt(xre, fzero):
re, im = libmp.mpc_pow_mpf((xre, fzero), yre, target_prec)
return finalize_complex(re, im, target_prec)
# positive ** real
else:
return mpf_pow(xre, yre, target_prec), None, target_prec, None
#----------------------------------------------------------------------------#
# #
# Special functions #
# #
#----------------------------------------------------------------------------#
def evalf_trig(v, prec, options):
"""
This function handles sin and cos of real arguments.
TODO: should also handle tan and complex arguments.
"""
if v.func is C.cos:
func = mpf_cos
elif v.func is C.sin:
func = mpf_sin
else:
raise NotImplementedError
arg = v.args[0]
# 20 extra bits is possibly overkill. It does make the need
# to restart very unlikely
xprec = prec + 20
re, im, re_acc, im_acc = evalf(arg, xprec, options)
if im:
raise NotImplementedError
if not re:
if v.func is C.cos:
return fone, None, prec, None
elif v.func is C.sin:
return None, None, None, None
else:
raise NotImplementedError
# For trigonometric functions, we are interested in the
# fixed-point (absolute) accuracy of the argument.
xsize = fastlog(re)
# Magnitude <= 1.0. OK to compute directly, because there is no
# danger of hitting the first root of cos (with sin, magnitude
# <= 2.0 would actually be ok)
if xsize < 1:
return func(re, prec, round_nearest), None, prec, None
# Very large
if xsize >= 10:
xprec = prec + xsize
re, im, re_acc, im_acc = evalf(arg, xprec, options)
# Need to repeat in case the argument is very close to a
# multiple of pi (or pi/2), hitting close to a root
while 1:
y = func(re, prec, round_nearest)
ysize = fastlog(y)
gap = -ysize
accuracy = (xprec - xsize) - gap
if accuracy < prec:
if options.get('verbose'):
print "SIN/COS", accuracy, "wanted", prec, "gap", gap
print to_str(y,10)
if xprec > options.get('maxprec', DEFAULT_MAXPREC):
return y, None, accuracy, None
xprec += gap
re, im, re_acc, im_acc = evalf(arg, xprec, options)
continue
else:
return y, None, prec, None
def evalf_log(expr, prec, options):
arg = expr.args[0]
workprec = prec+10
xre, xim, xacc, _ = evalf(arg, workprec, options)
if xim:
# XXX: use get_abs etc instead
re = evalf_log(C.log(C.abs(arg, evaluate=False), evaluate=False), prec, options)
im = mpf_atan2(xim, xre or fzero, prec)
return re[0], im, re[2], prec
imaginary_term = (mpf_cmp(xre, fzero) < 0)
re = mpf_log(mpf_abs(xre), prec, round_nearest)
size = fastlog(re)
if prec - size > workprec:
# We actually need to compute 1+x accurately, not x
arg = C.Add(S.NegativeOne,arg,evaluate=False)
xre, xim, xre_acc, xim_acc = evalf_add(arg, prec, options)
prec2 = workprec - fastlog(xre)
re = mpf_log(mpf_add(xre, fone, prec2), prec, round_nearest)
re_acc = prec
if imaginary_term:
return re, mpf_pi(prec), re_acc, prec
else:
return re, None, re_acc, None
def evalf_atan(v, prec, options):
arg = v.args[0]
xre, xim, reacc, imacc = evalf(arg, prec+5, options)
if xim:
raise NotImplementedError
return mpf_atan(xre, prec, round_nearest), None, prec, None
def evalf_piecewise(expr, prec, options):
if 'subs' in options:
expr = expr.subs(options['subs'])
del options['subs']
if hasattr(expr,'func'):
return evalf(expr, prec, options)
if type(expr) == float:
return evalf(C.Real(expr), prec, options)
if type(expr) == int:
return evalf(C.Integer(expr), prec, options)
# We still have undefined symbols
raise NotImplementedError
def evalf_piecewise(expr, prec, options):
if 'subs' in options:
expr = expr.subs(options['subs'])
del options['subs']
if hasattr(expr,'func'):
return evalf(expr, prec, options)
if type(expr) == float:
return evalf(C.Real(expr), prec, options)
if type(expr) == int:
return evalf(C.Integer(expr), prec, options)
# We still have undefined symbols
raise NotImplementedError
def evalf_bernoulli(expr, prec, options):
arg = expr.args[0]
if not arg.is_Integer:
raise ValueError("Bernoulli number index must be an integer")
n = int(arg)
b = mpf_bernoulli(n, prec, round_nearest)
if b == fzero:
return None, None, None, None
return b, None, prec, None
#----------------------------------------------------------------------------#
# #
# High-level operations #
# #
#----------------------------------------------------------------------------#
def as_mpmath(x, prec, options):
x = sympify(x)
if isinstance(x, C.Zero):
return mpf(0)
if isinstance(x, C.Infinity):
return mpf('inf')
if isinstance(x, C.NegativeInfinity):
return mpf('-inf')
# XXX
re, im, _, _ = evalf(x, prec, options)
if im:
return mpc(re or fzero, im)
return mpf(re)
def do_integral(expr, prec, options):
func = expr.args[0]
x, (xlow, xhigh) = expr.args[1][0]
orig = mp.prec
oldmaxprec = options.get('maxprec', DEFAULT_MAXPREC)
options['maxprec'] = min(oldmaxprec, 2*prec)
try:
mp.prec = prec+5
xlow = as_mpmath(xlow, prec+15, options)
xhigh = as_mpmath(xhigh, prec+15, options)
# Integration is like summation, and we can phone home from
# the integrand function to update accuracy summation style
# Note that this accuracy is inaccurate, since it fails
# to account for the variable quadrature weights,
# but it is better than nothing
have_part = [False, False]
max_real_term = [MINUS_INF]
max_imag_term = [MINUS_INF]
def f(t):
re, im, re_acc, im_acc = evalf(func, mp.prec, {'subs':{x:t}})
have_part[0] = re or have_part[0]
have_part[1] = im or have_part[1]
max_real_term[0] = max(max_real_term[0], fastlog(re))
max_imag_term[0] = max(max_imag_term[0], fastlog(im))
if im:
return mpc(re or fzero, im)
return mpf(re or fzero)
if options.get('quad') == 'osc':
A = C.Wild('A', exclude=[x])
B = C.Wild('B', exclude=[x])
D = C.Wild('D')
m = func.match(C.cos(A*x+B)*D)
if not m:
m = func.match(C.sin(A*x+B)*D)
if not m:
raise ValueError("An integrand of the form sin(A*x+B)*f(x) "
"or cos(A*x+B)*f(x) is required for oscillatory quadrature")
period = as_mpmath(2*S.Pi/m[A], prec+15, options)
result = quadosc(f, [xlow, xhigh], period=period)
# XXX: quadosc does not do error detection yet
quadrature_error = MINUS_INF
else:
result, quadrature_error = quadts(f, [xlow, xhigh], error=1)
quadrature_error = fastlog(quadrature_error._mpf_)
finally:
options['maxprec'] = oldmaxprec
mp.prec = orig
if have_part[0]:
re = result.real._mpf_
if re == fzero:
re = mpf_shift(fone, min(-prec,-max_real_term[0],-quadrature_error))
re_acc = -1
else:
re_acc = -max(max_real_term[0]-fastlog(re)-prec, quadrature_error)
else:
re, re_acc = None, None
if have_part[1]:
im = result.imag._mpf_
if im == fzero:
im = mpf_shift(fone, min(-prec,-max_imag_term[0],-quadrature_error))
im_acc = -1
else:
im_acc = -max(max_imag_term[0]-fastlog(im)-prec, quadrature_error)
else:
im, im_acc = None, None
result = re, im, re_acc, im_acc
return result
def evalf_integral(expr, prec, options):
workprec = prec
i = 0
maxprec = options.get('maxprec', INF)
while 1:
result = do_integral(expr, workprec, options)
accuracy = complex_accuracy(result)
if accuracy >= prec or workprec >= maxprec:
return result
workprec += prec - max(-2**i, accuracy)
i += 1
def check_convergence(numer, denom, n):
"""
Returns (h, g, p) where
-- h is:
> 0 for convergence of rate 1/factorial(n)**h
< 0 for divergence of rate factorial(n)**(-h)
= 0 for geometric or polynomial convergence or divergence
-- abs(g) is:
> 1 for geometric convergence of rate 1/h**n
< 1 for geometric divergence of rate h**n
= 1 for polynomial convergence or divergence
(g < 0 indicates an alternating series)
-- p is:
> 1 for polynomial convergence of rate 1/n**h
<= 1 for polynomial divergence of rate n**(-h)
"""
npol = C.Poly(numer, n)
dpol = C.Poly(denom, n)
p = npol.degree()
q = dpol.degree()
rate = q - p
if rate:
return rate, None, None
constant = dpol.LC() / npol.LC()
if abs(constant) != 1:
return rate, constant, None
if npol.degree() == dpol.degree() == 0:
return rate, constant, 0
pc = npol.all_coeffs()[1]
qc = dpol.all_coeffs()[1]
return rate, constant, qc-pc
def hypsum(expr, n, start, prec):
"""
Sum a rapidly convergent infinite hypergeometric series with
given general term, e.g. e = hypsum(1/factorial(n), n). The
quotient between successive terms must be a quotient of integer
polynomials.
"""
from sympy import hypersimp, lambdify
if start:
expr = expr.subs(n, n+start)
hs = hypersimp(expr, n)
if hs is None:
raise NotImplementedError("a hypergeometric series is required")
num, den = hs.as_numer_denom()
func1 = lambdify(n, num)
func2 = lambdify(n, den)
h, g, p = check_convergence(num, den, n)
if h < 0:
raise ValueError("Sum diverges like (n!)^%i" % (-h))
# Direct summation if geometric or faster
if h > 0 or (h == 0 and abs(g) > 1):
one = MPZ(1) << prec
term = expr.subs(n, 0)
term = (MPZ(term.p) << prec) // term.q
s = term
k = 1
while abs(term) > 5:
term *= MPZ(func1(k-1))
term //= MPZ(func2(k-1))
s += term
k += 1
return from_man_exp(s, -prec)
else:
alt = g < 0
if abs(g) < 1:
raise ValueError("Sum diverges like (%i)^n" % abs(1/g))
if p < 1 or (p == 1 and not alt):
raise ValueError("Sum diverges like n^%i" % (-p))
# We have polynomial convergence: use Richardson extrapolation
# Need to use at least quad precision because a lot of cancellation
# might occur in the extrapolation process
prec2 = 4*prec
one = MPZ(1) << prec2
term = expr.subs(n, 0)
term = (MPZ(term.p) << prec2) // term.q
def summand(k, _term=[term]):
if k:
k = int(k)
_term[0] *= MPZ(func1(k-1))
_term[0] //= MPZ(func2(k-1))
return make_mpf(from_man_exp(_term[0], -prec2))
orig = mp.prec
try:
mp.prec = prec
v = nsum(summand, [0, mpmath_inf], method='richardson')
finally:
mp.prec = orig
return v._mpf_
def evalf_sum(expr, prec, options):
func = expr.function
limits = expr.limits
if len(limits) != 1 or not isinstance(limits[0], tuple) or \
len(limits[0]) != 3:
raise NotImplementedError
prec2 = prec+10
try:
n, a, b = limits[0]
if b != S.Infinity or a != int(a):
raise NotImplementedError
# Use fast hypergeometric summation if possible
v = hypsum(func, n, int(a), prec2)
delta = prec - fastlog(v)
if fastlog(v) < -10:
v = hypsum(func, n, int(a), delta)
return v, None, min(prec, delta), None
except NotImplementedError:
# Euler-Maclaurin summation for general series
eps = C.Real(2.0)**(-prec)
for i in range(1, 5):
m = n = 2**i * prec
s, err = expr.euler_maclaurin(m=m, n=n, eps=eps, \
eval_integral=False)
err = err.evalf()
if err <= eps:
break
err = fastlog(evalf(abs(err), 20, options)[0])
re, im, re_acc, im_acc = evalf(s, prec2, options)
re_acc = max(re_acc, -err)
im_acc = max(im_acc, -err)
return re, im, re_acc, im_acc
#----------------------------------------------------------------------------#
# #
# Symbolic interface #
# #
#----------------------------------------------------------------------------#
def evalf_symbol(x, prec, options):
val = options['subs'][x]
if isinstance(val, mpf):
if not val:
return None, None, None, None
return val._mpf_, None, prec, None
else:
if not '_cache' in options:
options['_cache'] = {}
cache = options['_cache']
cached, cached_prec = cache.get(x.name, (None, MINUS_INF))
if cached_prec >= prec:
return cached
v = evalf(sympify(val), prec, options)
cache[x.name] = (v, prec)
return v
evalf_table = None
def _create_evalf_table():
global evalf_table
evalf_table = {
C.Symbol : evalf_symbol,
C.Dummy : evalf_symbol,
C.Real : lambda x, prec, options: (x._mpf_, None, prec, None),
C.Rational : lambda x, prec, options: (from_rational(x.p, x.q, prec), None, prec, None),
C.Integer : lambda x, prec, options: (from_int(x.p, prec), None, prec, None),
C.Zero : lambda x, prec, options: (None, None, prec, None),
C.One : lambda x, prec, options: (fone, None, prec, None),
C.Half : lambda x, prec, options: (fhalf, None, prec, None),
C.Pi : lambda x, prec, options: (mpf_pi(prec), None, prec, None),
C.Exp1 : lambda x, prec, options: (mpf_e(prec), None, prec, None),
C.ImaginaryUnit : lambda x, prec, options: (None, fone, None, prec),
C.NegativeOne : lambda x, prec, options: (fnone, None, prec, None),
C.exp : lambda x, prec, options: evalf_pow(C.Pow(S.Exp1, x.args[0],
evaluate=False), prec, options),
C.cos : evalf_trig,
C.sin : evalf_trig,
C.Add : evalf_add,
C.Mul : evalf_mul,
C.Pow : evalf_pow,
C.log : evalf_log,
C.atan : evalf_atan,
C.abs : evalf_abs,
C.re : evalf_re,
C.im : evalf_im,
C.floor : evalf_floor,
C.ceiling : evalf_ceiling,
C.Integral : evalf_integral,
C.Sum : evalf_sum,
C.Piecewise : evalf_piecewise,
C.bernoulli : evalf_bernoulli,
}
def evalf(x, prec, options):
try:
rf = evalf_table[x.func]
r = rf(x, prec, options)
except KeyError:
#r = finalize_complex(x._eval_evalf(prec)._mpf_, fzero, prec)
try:
# Fall back to ordinary evalf if possible
if 'subs' in options:
x = x.subs(options['subs'])
r = x._eval_evalf(prec)._mpf_, None, prec, None
except AttributeError:
raise NotImplementedError
if options.get("verbose"):
print "### input", x
print "### output", to_str(r[0] or fzero, 50)
print "### raw", r#r[0], r[2]
print
if options.get("chop"):
r = chop_parts(r, prec)
if options.get("strict"):
check_target(x, r, prec)
return r
class EvalfMixin(object):
"""Mixin class adding evalf capabililty."""
__slots__ = []
def evalf(self, n=15, subs=None, maxn=100, chop=False, strict=False, quad=None, verbose=False):
"""
Evaluate the given formula to an accuracy of n digits.
Optional keyword arguments:
subs=<dict>
Substitute numerical values for symbols, e.g.
subs={x:3, y:1+pi}.
maxn=<integer>
Allow a maximum temporary working precision of maxn digits
(default=100)
chop=<bool>
Replace tiny real or imaginary parts in subresults
by exact zeros (default=False)
strict=<bool>
Raise PrecisionExhausted if any subresult fails to evaluate
to full accuracy, given the available maxprec
(default=False)
quad=<str>
Choose algorithm for numerical quadrature. By default,
tanh-sinh quadrature is used. For oscillatory
integrals on an infinite interval, try quad='osc'.
verbose=<bool>
Print debug information (default=False)
"""
if not evalf_table:
_create_evalf_table()
prec = dps_to_prec(n)
options = {'maxprec': max(prec,int(maxn*LG10)), 'chop': chop,
'strict': strict, 'verbose': verbose}
if subs is not None:
options['subs'] = subs
if quad is not None:
options['quad'] = quad
try:
result = evalf(self, prec+4, options)
except NotImplementedError:
# Fall back to the ordinary evalf
v = self._eval_evalf(prec)
if v is None:
return self
try:
# If the result is numerical, normalize it
result = evalf(v, prec, options)
except:
# Probably contains symbols or unknown functions
return v
re, im, re_acc, im_acc = result
if re:
p = max(min(prec, re_acc), 1)
#re = mpf_pos(re, p, round_nearest)
re = C.Real._new(re, p)
else:
re = S.Zero
if im:
p = max(min(prec, im_acc), 1)
#im = mpf_pos(im, p, round_nearest)
im = C.Real._new(im, p)
return re + im*S.ImaginaryUnit
else:
return re
n = evalf
def _evalf(self, prec):
"""Helper for evalf. Does the same thing but takes binary precision"""
r = self._eval_evalf(prec)
if r is None:
r = self
return r
def _eval_evalf(self, prec):
return
def _seq_eval_evalf(self, prec):
return self.func(*[s._evalf(prec) for s in self.args])
def _to_mpmath(self, prec, allow_ints=True):
# mpmath functions accept ints as input
errmsg = "cannot convert to mpmath number"
if allow_ints and self.is_Integer:
return self.p
v = self._eval_evalf(prec)
if v is None:
raise ValueError(errmsg)
if v.is_Real:
return make_mpf(v._mpf_)
# Number + Number*I is also fine
re, im = v.as_real_imag()
if allow_ints and re.is_Integer:
re = from_int(re.p)
elif re.is_Real:
re = re._mpf_
else:
raise ValueError(errmsg)
if allow_ints and im.is_Integer:
im = from_int(im.p)
elif im.is_Real:
im = im._mpf_
else:
raise ValueError(errmsg)
return make_mpc((re, im))
def N(x, n=15, **options):
"""
Calls x.evalf(n, **options).
Both .evalf() and N() are equivalent, use the one that you like better.
Example:
>>> from sympy import Sum, Symbol, oo, N
>>> from sympy.abc import k
>>> Sum(1/k**k, (k, 1, oo))
Sum(k**(-k), (k, 1, oo))
>>> N(Sum(1/k**k, (k, 1, oo)), 4)
1.291
"""
return sympify(x).evalf(n, **options)
| bsd-3-clause | 1dcfe321787f1983986b7cd72aed6945 | 33.541407 | 99 | 0.542588 | 3.501219 | false | false | false | false |
mattpap/sympy-polys | sympy/core/sympify.py | 1 | 4396 | """sympify -- convert objects SymPy internal format"""
from types import NoneType
from inspect import getmro
from core import BasicMeta
class SympifyError(ValueError):
def __init__(self, expr, base_exc=None):
self.expr = expr
self.base_exc = base_exc
def __str__(self):
if self.base_exc is None:
return "SympifyError: %r" % (self.expr,)
return "Sympify of expression '%s' failed, because of exception being raised:\n%s: %s" % (self.expr, self.base_exc.__class__.__name__, str(self.base_exc))
sympy_classes = BasicMeta.all_classes
converter = {}
def sympify(a, locals=None, convert_xor=True, strict=False):
"""
Converts an arbitrary expression to a type that can be used inside sympy.
For example, it will convert python ints into instance of sympy.Rational,
floats into instances of sympy.Real, etc. It is also able to coerce symbolic
expressions which inherit from Basic. This can be useful in cooperation
with SAGE.
It currently accepts as arguments:
- any object defined in sympy (except matrices [TODO])
- standard numeric python types: int, long, float, Decimal
- strings (like "0.09" or "2e-19")
- booleans, including `None` (will leave them unchanged)
If the argument is already a type that sympy understands, it will do
nothing but return that value. This can be used at the beginning of a
function to ensure you are working with the correct type.
>>> from sympy import sympify
>>> sympify(2).is_integer
True
>>> sympify(2).is_real
True
>>> sympify(2.0).is_real
True
>>> sympify("2.0").is_real
True
>>> sympify("2e-45").is_real
True
If the option `strict` is set to `True`, only the types for which an
explicit conversion has been defined are converted. In the other
cases, a SympifyError is raised.
>>> sympify(True)
True
>>> sympify(True, strict=True)
Traceback (most recent call last):
...
SympifyError: SympifyError: True
"""
try:
cls = a.__class__
except AttributeError: #a is probably an old-style class object
cls = type(a)
if cls in sympy_classes:
return a
if cls in (bool, NoneType):
if strict:
raise SympifyError(a)
else:
return a
try:
return converter[cls](a)
except KeyError:
for superclass in getmro(cls):
try:
return converter[superclass](a)
except KeyError:
continue
try:
return a._sympy_()
except AttributeError:
pass
if not isinstance(a, basestring):
for coerce in (float, int):
try:
return sympify(coerce(a))
except (TypeError, ValueError, AttributeError, SympifyError):
continue
if strict:
raise SympifyError(a)
if isinstance(a, (list, tuple, set)):
return type(a)([sympify(x) for x in a])
# At this point we were given an arbitrary expression
# which does not inherit from Basic and doesn't implement
# _sympy_ (which is a canonical and robust way to convert
# anything to SymPy expression).
#
# As a last chance, we try to take "a"'s normal form via unicode()
# and try to parse it. If it fails, then we have no luck and
# return an exception
try:
a = unicode(a)
except Exception, exc:
raise SympifyError(a, exc)
if locals is None:
locals = {}
if convert_xor:
a = a.replace('^','**')
import ast_parser
return ast_parser.parse_expr(a, locals)
def _sympify(a):
"""Short version of sympify for internal usage for __add__ and __eq__
methods where it is ok to allow some things (like Python integers
and floats) in the expression. This excludes things (like strings)
that are unwise to allow into such an expression.
>>> from sympy import Integer
>>> Integer(1) == 1
True
>>> Integer(1) == '1'
False
>>> from sympy import Symbol
>>> from sympy.abc import x
>>> x + 1
1 + x
>>> x + '1'
Traceback (most recent call last):
...
TypeError: unsupported operand type(s) for +: 'Symbol' and 'str'
see: sympify
"""
return sympify(a, strict=True)
| bsd-3-clause | 6117cc0b0a8086f6c8b2e1f257815319 | 27.921053 | 162 | 0.6101 | 4.112254 | false | false | false | false |
mattpap/sympy-polys | sympy/printing/pretty/pretty_symbology.py | 5 | 13716 | """Symbolic primitives + unicode/ASCII abstraction for pretty.py"""
import sys
warnings = ''
# first, setup unicodedate environment
try:
import unicodedata
# Python2.4 unicodedata misses some symbols, like subscript 'i', etc,
# and we still want SymPy to be fully functional under Python2.4
if sys.hexversion < 0x02050000:
unicodedata_missing = {
'GREEK SUBSCRIPT SMALL LETTER BETA' : u'\u1d66',
'GREEK SUBSCRIPT SMALL LETTER GAMMA': u'\u1d67',
'GREEK SUBSCRIPT SMALL LETTER RHO' : u'\u1d68',
'GREEK SUBSCRIPT SMALL LETTER PHI' : u'\u1d69',
'GREEK SUBSCRIPT SMALL LETTER CHI' : u'\u1d6a',
'LATIN SUBSCRIPT SMALL LETTER A' : u'\u2090',
'LATIN SUBSCRIPT SMALL LETTER E' : u'\u2091',
'LATIN SUBSCRIPT SMALL LETTER I' : u'\u1d62',
'LATIN SUBSCRIPT SMALL LETTER O' : u'\u2092',
'LATIN SUBSCRIPT SMALL LETTER R' : u'\u1d63',
'LATIN SUBSCRIPT SMALL LETTER U' : u'\u1d64',
'LATIN SUBSCRIPT SMALL LETTER V' : u'\u1d65',
'LATIN SUBSCRIPT SMALL LETTER X' : u'\u2093',
}
else:
unicodedata_missing = {}
def U(name):
"""unicode character by name or None if not found"""
try:
u = unicodedata.lookup(name)
except KeyError:
u = unicodedata_missing.get(name)
if u is None:
global warnings
warnings += 'W: no \'%s\' in unocodedata\n' % name
return u
except ImportError:
warnings += 'W: no unicodedata available\n'
U = lambda name: None
from sympy.printing.conventions import split_super_sub
# prefix conventions when constructing tables
# L - LATIN i
# G - GREEK beta
# D - DIGIT 0
# S - SYMBOL +
__all__ = ['greek','sub','sup','xsym','vobj','hobj','pretty_symbol']
_use_unicode = False
def pretty_use_unicode(flag = None):
"""Set whether pretty-printer should use unicode by default"""
global _use_unicode
global warnings
if flag is None:
return _use_unicode
if flag and warnings:
# print warnings (if any) on first unicode usage
print "I: pprint -- we are going to use unicode, but there are following problems:"
print warnings
warnings = ''
use_unicode_prev = _use_unicode
_use_unicode = flag
return use_unicode_prev
def pretty_try_use_unicode():
"""See if unicode output is available and leverage it if possible"""
try:
symbols = []
# see, if we can represent greek alphabet
for g,G in greek.itervalues():
symbols.append(g)
symbols.append(G)
# and atoms
symbols += atoms_table.values()
for s in symbols:
if s is None:
return # common symbols not present!
encoding = getattr(sys.stdout, 'encoding', None)
# this happens when e.g. stdout is redirected through a pipe, or is
# e.g. a cStringIO.StringO
if encoding is None:
return # sys.stdout has no encoding
# try to encode
s.encode(encoding)
except UnicodeEncodeError:
pass
else:
pretty_use_unicode(True)
def xstr(*args):
"""call str or unicode depending on current mode"""
if _use_unicode:
return unicode(*args)
else:
return str(*args)
# COMPATIBILITY TWEAKS
def fixup_tables():
# python2.4 unicodedata lacks some definitions
for d in sub, sup:
for k in d.keys():
if d[k] is None:
del d[k]
# GREEK
g = lambda l: U('GREEK SMALL LETTER %s' % l.upper())
G = lambda l: U('GREEK CAPITAL LETTER %s' % l.upper())
greek_letters = [
'alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'theta',
'iota', 'kappa', 'lamda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho',
'sigma', 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega' ]
# {} greek letter -> (g,G)
greek = dict([(l, (g(l), G(l))) for l in greek_letters])
# aliases
greek['lambda'] = greek['lamda']
digit_2txt = {
'0' : 'ZERO',
'1' : 'ONE',
'2' : 'TWO',
'3' : 'THREE',
'4' : 'FOUR',
'5' : 'FIVE',
'6' : 'SIX',
'7' : 'SEVEN',
'8' : 'EIGHT',
'9' : 'NINE',
}
symb_2txt = {
'+' : 'PLUS SIGN',
'-' : 'MINUS',
'=' : 'EQUALS SIGN',
'(' : 'LEFT PARENTHESIS',
')' : 'RIGHT PARENTHESIS',
'[' : 'LEFT SQUARE BRACKET',
']' : 'RIGHT SQUARE BRACKET',
'{' : 'LEFT CURLY BRACKET',
'}' : 'RIGHT CURLY BRACKET',
# non-std
'{}' : 'CURLY BRACKET',
'sum': 'SUMMATION',
'int': 'INTEGRAL',
}
# SUBSCRIPT & SUPERSCRIPT
LSUB = lambda letter: U('LATIN SUBSCRIPT SMALL LETTER %s' % letter.upper())
GSUB = lambda letter: U('GREEK SUBSCRIPT SMALL LETTER %s' % letter.upper())
DSUB = lambda digit: U('SUBSCRIPT %s' % digit_2txt[digit])
SSUB = lambda symb: U('SUBSCRIPT %s' % symb_2txt[symb])
LSUP = lambda letter: U('SUPERSCRIPT LATIN SMALL LETTER %s' % letter.upper())
DSUP = lambda digit: U('SUPERSCRIPT %s' % digit_2txt[digit])
SSUP = lambda symb: U('SUPERSCRIPT %s' % symb_2txt[symb])
sub = {} # symb -> subscript symbol
sup = {} # symb -> superscript symbol
# latin subscripts
for l in 'aeioruvx':
sub[l] = LSUB(l)
for l in 'in':
sup[l] = LSUP(l)
for g in ['beta', 'gamma', 'rho', 'phi', 'chi']:
sub[g] = GSUB(g)
for d in [str(i) for i in range(10)]:
sub[d] = DSUB(d)
sup[d] = DSUP(d)
for s in '+-=()':
sub[s] = SSUB(s)
sup[s] = SSUP(s)
# VERTICAL OBJECTS
HUP = lambda symb: U('%s UPPER HOOK' % symb_2txt[symb])
CUP = lambda symb: U('%s UPPER CORNER' % symb_2txt[symb])
MID = lambda symb: U('%s MIDDLE PIECE' % symb_2txt[symb])
EXT = lambda symb: U('%s EXTENSION' % symb_2txt[symb])
HLO = lambda symb: U('%s LOWER HOOK' % symb_2txt[symb])
CLO = lambda symb: U('%s LOWER CORNER' % symb_2txt[symb])
TOP = lambda symb: U('%s TOP' % symb_2txt[symb])
BOT = lambda symb: U('%s BOTTOM' % symb_2txt[symb])
# {} '(' -> (extension, start, end, middle) 1-character
_xobj_unicode = {
# vertical symbols
# ext top bot mid c1
'(' : (( EXT('('), HUP('('), HLO('(') ), '('),
')' : (( EXT(')'), HUP(')'), HLO(')') ), ')'),
'[' : (( EXT('['), CUP('['), CLO('[') ), '['),
']' : (( EXT(']'), CUP(']'), CLO(']') ), ']'),
'{' : (( EXT('{}'), HUP('{'), HLO('{'), MID('{') ), '{'),
'}' : (( EXT('{}'), HUP('}'), HLO('}'), MID('}') ), '}'),
'|' : U('BOX DRAWINGS LIGHT VERTICAL'),
'lfloor' : (( EXT('['), EXT('['), CLO('[') ), U('LEFT FLOOR')),
'rfloor' : (( EXT(']'), EXT(']'), CLO(']') ), U('RIGHT FLOOR')),
'lceil' : (( EXT('['), CUP('['), EXT('[') ), U('LEFT CEILING')),
'rceil' : (( EXT(']'), CUP(']'), EXT(']') ), U('RIGHT CEILING')),
'int': (( EXT('int'), U('TOP HALF INTEGRAL'), U('BOTTOM HALF INTEGRAL') ), U('INTEGRAL')),
#'sum': ( U('N-ARY SUMMATION'), TOP('sum'), None, None, BOT('sum') ),
# horizontal objects
#'-' : '-',
'-' : U('BOX DRAWINGS LIGHT HORIZONTAL'),
'_' : U('HORIZONTAL SCAN LINE-9'), # XXX symbol ok?
# diagonal objects '\' & '/' ?
'/' : U('BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT'),
'\\': U('BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT'),
}
_xobj_ascii = {
# vertical symbols
# ext top bot mid c1
'(' : (( '|', '/', '\\' ), '('),
')' : (( '|', '\\', '/' ), ')'),
# XXX this looks ugly
# '[' : (( '|', '-', '-' ), '['),
# ']' : (( '|', '-', '-' ), ']'),
# XXX not so ugly :(
'[' : (( '[', '[', '[' ), '['),
']' : (( ']', ']', ']' ), ']'),
'{' : (( '|', '/', '\\', '<' ), '{'),
'}' : (( '|', '\\', '/', '>' ), '}'),
'|' : '|',
'int': ( ' | ', ' /', '/ ' ),
# horizontal objects
'-' : '-',
'_' : '_',
# diagonal objects '\' & '/' ?
'/' : '/',
'\\': '\\',
}
def xobj(symb, length):
"""Construct spatial object of given length.
return: [] of equal-length strings
"""
assert length > 0
# TODO robustify when no unicodedat available
if _use_unicode:
_xobj = _xobj_unicode
else:
_xobj = _xobj_ascii
vinfo = _xobj[symb]
c1 = top = bot = mid = None
if not isinstance(vinfo, tuple): # 1 entry
ext = vinfo
else:
if isinstance(vinfo[0], tuple): # (vlong), c1
vlong = vinfo[0]
c1 = vinfo[1]
else: # (vlong), c1
vlong = vinfo
ext = vlong[0]
try:
top = vlong[1]
bot = vlong[2]
mid = vlong[3]
except IndexError:
pass
if c1 is None: c1 = ext
if top is None: top = ext
if bot is None: bot = ext
if mid is not None:
if (length % 2) == 0:
# even height, but we have to print it somehow anyway...
# XXX is it ok?
length += 1
else:
mid = ext
if length == 1:
return c1
res = []
next= (length-2)//2
nmid= (length-2) - next*2
res += [top]
res += [ext]*next
res += [mid]*nmid
res += [ext]*next
res += [bot]
return res
def vobj(symb, height):
"""Construct vertical object of a given height
see: xobj
"""
return '\n'.join( xobj(symb, height) )
def hobj(symb, width):
"""Construct horizontal object of a given width
see: xobj
"""
return ''.join( xobj(symb, width) )
# RADICAL
# n -> symbol
root = {
2 : U('SQUARE ROOT'), # U('RADICAL SYMBOL BOTTOM')
3 : U('CUBE ROOT'),
4 : U('FOURTH ROOT'),
}
# RATIONAL
VF = lambda txt: U('VULGAR FRACTION %s' % txt)
# (p,q) -> symbol
frac = {
(1,2) : VF('ONE HALF'),
(1,3) : VF('ONE THIRD'),
(2,3) : VF('TWO THIRDS'),
(1,4) : VF('ONE QUARTER'),
(3,4) : VF('THREE QUARTERS'),
(1,5) : VF('ONE FIFTH'),
(2,5) : VF('TWO FIFTHS'),
(3,5) : VF('THREE FIFTHS'),
(4,5) : VF('FOUR FIFTHS'),
(1,6) : VF('ONE SIXTH'),
(5,6) : VF('FIVE SIXTHS'),
(1,8) : VF('ONE EIGHTH'),
(3,8) : VF('THREE EIGHTHS'),
(5,8) : VF('FIVE EIGHTHS'),
(7,8) : VF('SEVEN EIGHTHS'),
}
# atom symbols
_xsym = {
'==' : ( '=', '='),
'<' : ( '<', '<'),
'<=' : ('<=', U('LESS-THAN OR EQUAL TO')),
'>=' : ('>=', U('GREATER-THAN OR EQUAL TO')),
'!=' : ('!=', U('NOT EQUAL TO')),
'*' : ('*', U('DOT OPERATOR')),
}
def xsym(sym):
"""get symbology for a 'character'"""
op = _xsym[sym]
if _use_unicode:
return op[1]
else:
return op[0]
# SYMBOLS
atoms_table = {
# class how-to-display
'Exp1' : U('SCRIPT SMALL E'),
'Pi' : U('GREEK SMALL LETTER PI'),
'Infinity' : U('INFINITY'),
'NegativeInfinity' : U('INFINITY') and ('-'+U('INFINITY')), # XXX what to do here
#'ImaginaryUnit' : U('GREEK SMALL LETTER IOTA'),
#'ImaginaryUnit' : U('MATHEMATICAL ITALIC SMALL I'),
'ImaginaryUnit' : U('DOUBLE-STRUCK ITALIC SMALL I'),
'EmptySet' : U('EMPTY SET'),
'Union' : U('UNION')
}
def pretty_atom(atom_name, default=None):
"""return pretty representation of an atom"""
if _use_unicode:
return atoms_table[atom_name]
else:
if default is not None:
return default
raise KeyError('only unicode') # send it default printer
def pretty_symbol(symb_name):
"""return pretty representation of a symbol"""
# let's split symb_name into symbol + index
# UC: beta1
# UC: f_beta
if not _use_unicode:
return symb_name
name, sups, subs = split_super_sub(symb_name)
# let's prettify name
gG = greek.get(name.lower())
if gG is not None:
if name.islower():
greek_name = greek.get(name.lower())[0]
else:
greek_name = greek.get(name.lower())[1]
# some letters may not be available
if greek_name is not None:
name = greek_name
# Let's prettify sups/subs. If it fails at one of them, pretty sups/subs are
# not used at all.
def pretty_list(l, mapping):
result = []
for s in l:
pretty = mapping.get(s)
if pretty is None:
try: # match by separate characters
pretty = ''.join([mapping[c] for c in s])
except KeyError:
return None
result.append(pretty)
return result
pretty_sups = pretty_list(sups, sup)
if pretty_sups is not None:
pretty_subs = pretty_list(subs, sub)
else:
pretty_subs = None
# glue the results into one string
if pretty_subs is None: # nice formatting of sups/subs did not work
if len(sups) > 0:
sups_result = '^' + '^'.join(sups)
else:
sups_result = ''
if len(subs) > 0:
subs_result = '_' + '_'.join(subs)
else:
subs_result = ''
else:
sups_result = ' '.join(pretty_sups)
subs_result = ' '.join(pretty_subs)
return ''.join([name, sups_result, subs_result])
# final fixup
fixup_tables()
| bsd-3-clause | 78e659233c37802ce8c0ab6580e86c9f | 26.597586 | 95 | 0.493584 | 3.189026 | false | false | false | false |
mattpap/sympy-polys | sympy/mpmath/tests/test_interval.py | 1 | 15390 | from sympy.mpmath import *
def test_interval_identity():
iv.dps = 15
assert mpi(2) == mpi(2, 2)
assert mpi(2) != mpi(-2, 2)
assert not (mpi(2) != mpi(2, 2))
assert mpi(-1, 1) == mpi(-1, 1)
assert str(mpi('0.1')) == "[0.099999999999999991673, 0.10000000000000000555]"
assert repr(mpi('0.1')) == "mpi('0.099999999999999992', '0.10000000000000001')"
u = mpi(-1, 3)
assert -1 in u
assert 2 in u
assert 3 in u
assert -1.1 not in u
assert 3.1 not in u
assert mpi(-1, 3) in u
assert mpi(0, 1) in u
assert mpi(-1.1, 2) not in u
assert mpi(2.5, 3.1) not in u
w = mpi(-inf, inf)
assert mpi(-5, 5) in w
assert mpi(2, inf) in w
assert mpi(0, 2) in mpi(0, 10)
assert not (3 in mpi(-inf, 0))
def test_interval_arithmetic():
iv.dps = 15
assert mpi(2) + mpi(3,4) == mpi(5,6)
assert mpi(1, 2)**2 == mpi(1, 4)
assert mpi(1) + mpi(0, 1e-50) == mpi(1, mpf('1.0000000000000002'))
x = 1 / (1 / mpi(3))
assert x.a < 3 < x.b
x = mpi(2) ** mpi(0.5)
iv.dps += 5
sq = iv.sqrt(2)
iv.dps -= 5
assert x.a < sq < x.b
assert mpi(1) / mpi(1, inf)
assert mpi(2, 3) / inf == mpi(0, 0)
assert mpi(0) / inf == 0
assert mpi(0) / 0 == mpi(-inf, inf)
assert mpi(inf) / 0 == mpi(-inf, inf)
assert mpi(0) * inf == mpi(-inf, inf)
assert 1 / mpi(2, inf) == mpi(0, 0.5)
assert str((mpi(50, 50) * mpi(-10, -10)) / 3) == \
'[-166.66666666666668561, -166.66666666666665719]'
assert mpi(0, 4) ** 3 == mpi(0, 64)
assert mpi(2,4).mid == 3
iv.dps = 30
a = mpi(iv.pi)
iv.dps = 15
b = +a
assert b.a < a.a
assert b.b > a.b
a = mpi(iv.pi)
assert a == +a
assert abs(mpi(-1,2)) == mpi(0,2)
assert abs(mpi(0.5,2)) == mpi(0.5,2)
assert abs(mpi(-3,2)) == mpi(0,3)
assert abs(mpi(-3,-0.5)) == mpi(0.5,3)
assert mpi(0) * mpi(2,3) == mpi(0)
assert mpi(2,3) * mpi(0) == mpi(0)
assert mpi(1,3).delta == 2
assert mpi(1,2) - mpi(3,4) == mpi(-3,-1)
assert mpi(-inf,0) - mpi(0,inf) == mpi(-inf,0)
assert mpi(-inf,0) - mpi(-inf,inf) == mpi(-inf,inf)
assert mpi(0,inf) - mpi(-inf,1) == mpi(-1,inf)
def test_interval_mul():
assert mpi(-1, 0) * inf == mpi(-inf, 0)
assert mpi(-1, 0) * -inf == mpi(0, inf)
assert mpi(0, 1) * inf == mpi(0, inf)
assert mpi(0, 1) * mpi(0, inf) == mpi(0, inf)
assert mpi(-1, 1) * inf == mpi(-inf, inf)
assert mpi(-1, 1) * mpi(0, inf) == mpi(-inf, inf)
assert mpi(-1, 1) * mpi(-inf, inf) == mpi(-inf, inf)
assert mpi(-inf, 0) * mpi(0, 1) == mpi(-inf, 0)
assert mpi(-inf, 0) * mpi(0, 0) * mpi(-inf, 0)
assert mpi(-inf, 0) * mpi(-inf, inf) == mpi(-inf, inf)
assert mpi(-5,0)*mpi(-32,28) == mpi(-140,160)
assert mpi(2,3) * mpi(-1,2) == mpi(-3,6)
# Should be undefined?
assert mpi(inf, inf) * 0 == mpi(-inf, inf)
assert mpi(-inf, -inf) * 0 == mpi(-inf, inf)
assert mpi(0) * mpi(-inf,2) == mpi(-inf,inf)
assert mpi(0) * mpi(-2,inf) == mpi(-inf,inf)
assert mpi(-2,inf) * mpi(0) == mpi(-inf,inf)
assert mpi(-inf,2) * mpi(0) == mpi(-inf,inf)
def test_interval_pow():
assert mpi(3)**2 == mpi(9, 9)
assert mpi(-3)**2 == mpi(9, 9)
assert mpi(-3, 1)**2 == mpi(0, 9)
assert mpi(-3, -1)**2 == mpi(1, 9)
assert mpi(-3, -1)**3 == mpi(-27, -1)
assert mpi(-3, 1)**3 == mpi(-27, 1)
assert mpi(-2, 3)**2 == mpi(0, 9)
assert mpi(-3, 2)**2 == mpi(0, 9)
assert mpi(4) ** -1 == mpi(0.25, 0.25)
assert mpi(-4) ** -1 == mpi(-0.25, -0.25)
assert mpi(4) ** -2 == mpi(0.0625, 0.0625)
assert mpi(-4) ** -2 == mpi(0.0625, 0.0625)
assert mpi(0, 1) ** inf == mpi(0, 1)
assert mpi(0, 1) ** -inf == mpi(1, inf)
assert mpi(0, inf) ** inf == mpi(0, inf)
assert mpi(0, inf) ** -inf == mpi(0, inf)
assert mpi(1, inf) ** inf == mpi(1, inf)
assert mpi(1, inf) ** -inf == mpi(0, 1)
assert mpi(2, 3) ** 1 == mpi(2, 3)
assert mpi(2, 3) ** 0 == 1
assert mpi(1,3) ** mpi(2) == mpi(1,9)
def test_interval_sqrt():
assert mpi(4) ** 0.5 == mpi(2)
def test_interval_div():
assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5)
assert mpi(0, 1) / mpi(0, 1) == mpi(0, inf)
assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf)
assert mpi(inf, inf) / mpi(2, inf) == mpi(0, inf)
assert mpi(inf, inf) / mpi(2, 2) == mpi(inf, inf)
assert mpi(0, inf) / mpi(2, inf) == mpi(0, inf)
assert mpi(0, inf) / mpi(2, 2) == mpi(0, inf)
assert mpi(2, inf) / mpi(2, 2) == mpi(1, inf)
assert mpi(2, inf) / mpi(2, inf) == mpi(0, inf)
assert mpi(-4, 8) / mpi(1, inf) == mpi(-4, 8)
assert mpi(-4, 8) / mpi(0.5, inf) == mpi(-8, 16)
assert mpi(-inf, 8) / mpi(0.5, inf) == mpi(-inf, 16)
assert mpi(-inf, inf) / mpi(0.5, inf) == mpi(-inf, inf)
assert mpi(8, inf) / mpi(0.5, inf) == mpi(0, inf)
assert mpi(-8, inf) / mpi(0.5, inf) == mpi(-16, inf)
assert mpi(-4, 8) / mpi(inf, inf) == mpi(0, 0)
assert mpi(0, 8) / mpi(inf, inf) == mpi(0, 0)
assert mpi(0, 0) / mpi(inf, inf) == mpi(0, 0)
assert mpi(-inf, 0) / mpi(inf, inf) == mpi(-inf, 0)
assert mpi(-inf, 8) / mpi(inf, inf) == mpi(-inf, 0)
assert mpi(-inf, inf) / mpi(inf, inf) == mpi(-inf, inf)
assert mpi(-8, inf) / mpi(inf, inf) == mpi(0, inf)
assert mpi(0, inf) / mpi(inf, inf) == mpi(0, inf)
assert mpi(8, inf) / mpi(inf, inf) == mpi(0, inf)
assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf)
assert mpi(-1, 2) / mpi(0, 1) == mpi(-inf, +inf)
assert mpi(0, 1) / mpi(0, 1) == mpi(0.0, +inf)
assert mpi(-1, 0) / mpi(0, 1) == mpi(-inf, 0.0)
assert mpi(-0.5, -0.25) / mpi(0, 1) == mpi(-inf, -0.25)
assert mpi(0.5, 1) / mpi(0, 1) == mpi(0.5, +inf)
assert mpi(0.5, 4) / mpi(0, 1) == mpi(0.5, +inf)
assert mpi(-1, -0.5) / mpi(0, 1) == mpi(-inf, -0.5)
assert mpi(-4, -0.5) / mpi(0, 1) == mpi(-inf, -0.5)
assert mpi(-1, 2) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(0, 1) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(-1, 0) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(-0.5, -0.25) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(0.5, 1) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(0.5, 4) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(-1, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(-4, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf)
assert mpi(-1, 2) / mpi(-1, 0) == mpi(-inf, +inf)
assert mpi(0, 1) / mpi(-1, 0) == mpi(-inf, 0.0)
assert mpi(-1, 0) / mpi(-1, 0) == mpi(0.0, +inf)
assert mpi(-0.5, -0.25) / mpi(-1, 0) == mpi(0.25, +inf)
assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5)
assert mpi(0.5, 4) / mpi(-1, 0) == mpi(-inf, -0.5)
assert mpi(-1, -0.5) / mpi(-1, 0) == mpi(0.5, +inf)
assert mpi(-4, -0.5) / mpi(-1, 0) == mpi(0.5, +inf)
assert mpi(-1, 2) / mpi(0.5, 1) == mpi(-2.0, 4.0)
assert mpi(0, 1) / mpi(0.5, 1) == mpi(0.0, 2.0)
assert mpi(-1, 0) / mpi(0.5, 1) == mpi(-2.0, 0.0)
assert mpi(-0.5, -0.25) / mpi(0.5, 1) == mpi(-1.0, -0.25)
assert mpi(0.5, 1) / mpi(0.5, 1) == mpi(0.5, 2.0)
assert mpi(0.5, 4) / mpi(0.5, 1) == mpi(0.5, 8.0)
assert mpi(-1, -0.5) / mpi(0.5, 1) == mpi(-2.0, -0.5)
assert mpi(-4, -0.5) / mpi(0.5, 1) == mpi(-8.0, -0.5)
assert mpi(-1, 2) / mpi(-2, -0.5) == mpi(-4.0, 2.0)
assert mpi(0, 1) / mpi(-2, -0.5) == mpi(-2.0, 0.0)
assert mpi(-1, 0) / mpi(-2, -0.5) == mpi(0.0, 2.0)
assert mpi(-0.5, -0.25) / mpi(-2, -0.5) == mpi(0.125, 1.0)
assert mpi(0.5, 1) / mpi(-2, -0.5) == mpi(-2.0, -0.25)
assert mpi(0.5, 4) / mpi(-2, -0.5) == mpi(-8.0, -0.25)
assert mpi(-1, -0.5) / mpi(-2, -0.5) == mpi(0.25, 2.0)
assert mpi(-4, -0.5) / mpi(-2, -0.5) == mpi(0.25, 8.0)
# Should be undefined?
assert mpi(0, 0) / mpi(0, 0) == mpi(-inf, inf)
assert mpi(0, 0) / mpi(0, 1) == mpi(-inf, inf)
def test_interval_cos_sin():
iv.dps = 15
cos = iv.cos
sin = iv.sin
tan = iv.tan
pi = iv.pi
# Around 0
assert cos(mpi(0)) == 1
assert sin(mpi(0)) == 0
assert cos(mpi(0,1)) == mpi(0.54030230586813965399, 1.0)
assert sin(mpi(0,1)) == mpi(0, 0.8414709848078966159)
assert cos(mpi(1,2)) == mpi(-0.4161468365471424069, 0.54030230586813976501)
assert sin(mpi(1,2)) == mpi(0.84147098480789650488, 1.0)
assert sin(mpi(1,2.5)) == mpi(0.59847214410395643824, 1.0)
assert cos(mpi(-1, 1)) == mpi(0.54030230586813965399, 1.0)
assert cos(mpi(-1, 0.5)) == mpi(0.54030230586813965399, 1.0)
assert cos(mpi(-1, 1.5)) == mpi(0.070737201667702906405, 1.0)
assert sin(mpi(-1,1)) == mpi(-0.8414709848078966159, 0.8414709848078966159)
assert sin(mpi(-1,0.5)) == mpi(-0.8414709848078966159, 0.47942553860420300538)
assert mpi(-0.8414709848078966159, 1.00000000000000002e-100) in sin(mpi(-1,1e-100))
assert mpi(-2.00000000000000004e-100, 1.00000000000000002e-100) in sin(mpi(-2e-100,1e-100))
# Same interval
assert cos(mpi(2, 2.5))
assert cos(mpi(3.5, 4)) == mpi(-0.93645668729079634129, -0.65364362086361182946)
assert cos(mpi(5, 5.5)) == mpi(0.28366218546322624627, 0.70866977429126010168)
assert mpi(0.59847214410395654927, 0.90929742682568170942) in sin(mpi(2, 2.5))
assert sin(mpi(3.5, 4)) == mpi(-0.75680249530792831347, -0.35078322768961983646)
assert sin(mpi(5, 5.5)) == mpi(-0.95892427466313856499, -0.70554032557039181306)
# Higher roots
iv.dps = 55
w = 4*10**50 + mpi(0.5)
for p in [15, 40, 80]:
iv.dps = p
assert 0 in sin(4*mpi(pi))
assert 0 in sin(4*10**50*mpi(pi))
assert 0 in cos((4+0.5)*mpi(pi))
assert 0 in cos(w*mpi(pi))
assert 1 in cos(4*mpi(pi))
assert 1 in cos(4*10**50*mpi(pi))
iv.dps = 15
assert cos(mpi(2,inf)) == mpi(-1,1)
assert sin(mpi(2,inf)) == mpi(-1,1)
assert cos(mpi(-inf,2)) == mpi(-1,1)
assert sin(mpi(-inf,2)) == mpi(-1,1)
u = tan(mpi(0.5,1))
assert mpf(u.a).ae(mp.tan(0.5))
assert mpf(u.b).ae(mp.tan(1))
v = iv.cot(mpi(0.5,1))
assert mpf(v.a).ae(mp.cot(1))
assert mpf(v.b).ae(mp.cot(0.5))
# Sanity check of evaluation at n*pi and (n+1/2)*pi
for n in range(-5,7,2):
x = iv.cos(n*iv.pi)
assert -1 in x
assert x >= -1
assert x != -1
x = iv.sin((n+0.5)*iv.pi)
assert -1 in x
assert x >= -1
assert x != -1
for n in range(-6,8,2):
x = iv.cos(n*iv.pi)
assert 1 in x
assert x <= 1
if n:
assert x != 1
x = iv.sin((n+0.5)*iv.pi)
assert 1 in x
assert x <= 1
assert x != 1
for n in range(-6,7):
x = iv.cos((n+0.5)*iv.pi)
assert x.a < 0 < x.b
x = iv.sin(n*iv.pi)
if n:
assert x.a < 0 < x.b
def test_interval_complex():
# TODO: many more tests
iv.dps = 15
mp.dps = 15
assert iv.mpc(2,3) == 2+3j
assert iv.mpc(2,3) != 2+4j
assert iv.mpc(2,3) != 1+3j
assert 1+3j in iv.mpc([1,2],[3,4])
assert 2+5j not in iv.mpc([1,2],[3,4])
assert iv.mpc(1,2) + 1j == 1+3j
assert iv.mpc([1,2],[2,3]) + 2+3j == iv.mpc([3,4],[5,6])
assert iv.mpc([2,4],[4,8]) / 2 == iv.mpc([1,2],[2,4])
assert iv.mpc([1,2],[2,4]) * 2j == iv.mpc([-8,-4],[2,4])
assert iv.mpc([2,4],[4,8]) / 2j == iv.mpc([2,4],[-2,-1])
assert iv.exp(2+3j).ae(mp.exp(2+3j))
assert iv.log(2+3j).ae(mp.log(2+3j))
assert (iv.mpc(2,3) ** iv.mpc(0.5,2)).ae(mp.mpc(2,3) ** mp.mpc(0.5,2))
assert 1j in (iv.mpf(-1) ** 0.5)
assert 1j in (iv.mpc(-1) ** 0.5)
assert abs(iv.mpc(0)) == 0
assert abs(iv.mpc(inf)) == inf
assert abs(iv.mpc(3,4)) == 5
assert abs(iv.mpc(4)) == 4
assert abs(iv.mpc(0,4)) == 4
assert abs(iv.mpc(0,[2,3])) == iv.mpf([2,3])
assert abs(iv.mpc(0,[-3,2])) == iv.mpf([0,3])
assert abs(iv.mpc([3,5],[4,12])) == iv.mpf([5,13])
assert abs(iv.mpc([3,5],[-4,12])) == iv.mpf([3,13])
assert iv.mpc(2,3) ** 0 == 1
assert iv.mpc(2,3) ** 1 == (2+3j)
assert iv.mpc(2,3) ** 2 == (2+3j)**2
assert iv.mpc(2,3) ** 3 == (2+3j)**3
assert iv.mpc(2,3) ** 4 == (2+3j)**4
assert iv.mpc(2,3) ** 5 == (2+3j)**5
assert iv.mpc(2,2) ** (-1) == (2+2j) ** (-1)
assert iv.mpc(2,2) ** (-2) == (2+2j) ** (-2)
assert iv.cos(2).ae(mp.cos(2))
assert iv.sin(2).ae(mp.sin(2))
assert iv.cos(2+3j).ae(mp.cos(2+3j))
assert iv.sin(2+3j).ae(mp.sin(2+3j))
def test_interval_complex_arg():
assert iv.arg(3) == 0
assert iv.arg(0) == 0
assert iv.arg([0,3]) == 0
assert iv.arg(-3).ae(pi)
assert iv.arg(2+3j).ae(iv.arg(2+3j))
z = iv.mpc([-2,-1],[3,4])
t = iv.arg(z)
assert t.a.ae(mp.arg(-1+4j))
assert t.b.ae(mp.arg(-2+3j))
z = iv.mpc([-2,1],[3,4])
t = iv.arg(z)
assert t.a.ae(mp.arg(1+3j))
assert t.b.ae(mp.arg(-2+3j))
z = iv.mpc([1,2],[3,4])
t = iv.arg(z)
assert t.a.ae(mp.arg(2+3j))
assert t.b.ae(mp.arg(1+4j))
z = iv.mpc([1,2],[-2,3])
t = iv.arg(z)
assert t.a.ae(mp.arg(1-2j))
assert t.b.ae(mp.arg(1+3j))
z = iv.mpc([1,2],[-4,-3])
t = iv.arg(z)
assert t.a.ae(mp.arg(1-4j))
assert t.b.ae(mp.arg(2-3j))
z = iv.mpc([-1,2],[-4,-3])
t = iv.arg(z)
assert t.a.ae(mp.arg(-1-3j))
assert t.b.ae(mp.arg(2-3j))
z = iv.mpc([-2,-1],[-4,-3])
t = iv.arg(z)
assert t.a.ae(mp.arg(-2-3j))
assert t.b.ae(mp.arg(-1-4j))
z = iv.mpc([-2,-1],[-3,3])
t = iv.arg(z)
assert t.a.ae(-mp.pi)
assert t.b.ae(mp.pi)
z = iv.mpc([-2,2],[-3,3])
t = iv.arg(z)
assert t.a.ae(-mp.pi)
assert t.b.ae(mp.pi)
def test_interval_ae():
iv.dps = 15
x = iv.mpf([1,2])
assert x.ae(1) is None
assert x.ae(1.5) is None
assert x.ae(2) is None
assert x.ae(2.01) is False
assert x.ae(0.99) is False
x = iv.mpf(3.5)
assert x.ae(3.5) is True
assert x.ae(3.5+1e-15) is True
assert x.ae(3.5-1e-15) is True
assert x.ae(3.501) is False
assert x.ae(3.499) is False
assert x.ae(iv.mpf([3.5,3.501])) is None
assert x.ae(iv.mpf([3.5,4.5+1e-15])) is None
def test_interval_nstr():
iv.dps = n = 30
x = mpi(1, 2)
# FIXME: error_dps should not be necessary
assert iv.nstr(x, n, mode='plusminus', error_dps=6) == '1.5 +- 0.5'
assert iv.nstr(x, n, mode='plusminus', use_spaces=False, error_dps=6) == '1.5+-0.5'
assert iv.nstr(x, n, mode='percent') == '1.5 (33.33%)'
assert iv.nstr(x, n, mode='brackets', use_spaces=False) == '[1.0,2.0]'
assert iv.nstr(x, n, mode='brackets' , brackets=('<', '>')) == '<1.0, 2.0>'
x = mpi('5.2582327113062393041', '5.2582327113062749951')
assert iv.nstr(x, n, mode='diff') == '5.2582327113062[393041, 749951]'
assert iv.nstr(iv.cos(mpi(1)), n, mode='diff', use_spaces=False) == '0.54030230586813971740093660744[2955,3053]'
assert iv.nstr(mpi('1e123', '1e129'), n, mode='diff') == '[1.0e+123, 1.0e+129]'
exp = iv.exp
assert iv.nstr(iv.exp(mpi('5000.1')), n, mode='diff') == '3.2797365856787867069110487[0926, 1191]e+2171'
def test_mpi_from_str():
iv.dps = 15
assert iv.convert('1.5 +- 0.5') == mpi(mpf('1.0'), mpf('2.0'))
assert mpi(1, 2) in iv.convert('1.5 (33.33333333333333333333333333333%)')
assert iv.convert('[1, 2]') == mpi(1, 2)
assert iv.convert('1[2, 3]') == mpi(12, 13)
assert iv.convert('1.[23,46]e-8') == mpi('1.23e-8', '1.46e-8')
assert iv.convert('12[3.4,5.9]e4') == mpi('123.4e+4', '125.9e4')
| bsd-3-clause | 15d60cfd3185a533c3d8d0998dcc9013 | 38.664948 | 116 | 0.518778 | 2.268573 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/graphics/vertexdomain.py | 5 | 28839 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id:$
'''Manage related vertex attributes within a single vertex domain.
A vertex "domain" consists of a set of attribute descriptions that together
describe the layout of one or more vertex buffers which are used together to
specify the vertices in a primitive. Additionally, the domain manages the
buffers used to store the data and will resize them as necessary to accomodate
new vertices.
Domains can optionally be indexed, in which case they also manage a buffer
containing vertex indices. This buffer is grown separately and has no size
relation to the attribute buffers.
Applications can create vertices (and optionally, indices) within a domain
with the `VertexDomain.create` method. This returns a `VertexList`
representing the list of vertices created. The vertex attribute data within
the group can be modified, and the changes will be made to the underlying
buffers automatically.
The entire domain can be efficiently drawn in one step with the
`VertexDomain.draw` method, assuming all the vertices comprise primitives of
the same OpenGL primitive mode.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import re
from pyglet.gl import *
from pyglet.graphics import allocation, vertexattribute, vertexbuffer
_usage_format_re = re.compile(r'''
(?P<attribute>[^/]*)
(/ (?P<usage> static|dynamic|stream|none))?
''', re.VERBOSE)
_gl_usages = {
'static': GL_STATIC_DRAW,
'dynamic': GL_DYNAMIC_DRAW,
'stream': GL_STREAM_DRAW,
'none': GL_STREAM_DRAW_ARB, # Force no VBO
}
def _nearest_pow2(v):
# From http://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
# Credit: Sean Anderson
v -= 1
v |= v >> 1
v |= v >> 2
v |= v >> 4
v |= v >> 8
v |= v >> 16
return v + 1
def create_attribute_usage(format):
'''Create an attribute and usage pair from a format string. The
format string is as documented in `pyglet.graphics.vertexattribute`, with
the addition of an optional usage component::
usage ::= attribute ( '/' ('static' | 'dynamic' | 'stream' | 'none') )?
If the usage is not given it defaults to 'dynamic'. The usage corresponds
to the OpenGL VBO usage hint, and for ``static`` also indicates a
preference for interleaved arrays. If ``none`` is specified a buffer
object is not created, and vertex data is stored in system memory.
Some examples:
``v3f/stream``
3D vertex position using floats, for stream usage
``c4b/static``
4-byte color attribute, for static usage
:return: attribute, usage
'''
match = _usage_format_re.match(format)
attribute_format = match.group('attribute')
attribute = vertexattribute.create_attribute(attribute_format)
usage = match.group('usage')
if usage:
vbo = not usage == 'none'
usage = _gl_usages[usage]
else:
usage = GL_DYNAMIC_DRAW
vbo = True
return (attribute, usage, vbo)
def create_domain(*attribute_usage_formats):
'''Create a vertex domain covering the given attribute usage formats.
See documentation for `create_attribute_usage` and
`pyglet.graphics.vertexattribute.create_attribute` for the grammar of
these format strings.
:rtype: `VertexDomain`
'''
attribute_usages = [create_attribute_usage(f) \
for f in attribute_usage_formats]
return VertexDomain(attribute_usages)
def create_indexed_domain(*attribute_usage_formats):
'''Create an indexed vertex domain covering the given attribute usage
formats. See documentation for `create_attribute_usage` and
`pyglet.graphics.vertexattribute.create_attribute` for the grammar of
these format strings.
:rtype: `VertexDomain`
'''
attribute_usages = [create_attribute_usage(f) \
for f in attribute_usage_formats]
return IndexedVertexDomain(attribute_usages)
class VertexDomain(object):
'''Management of a set of vertex lists.
Construction of a vertex domain is usually done with the `create_domain`
function.
'''
_version = 0
_initial_count = 16
def __init__(self, attribute_usages):
self.allocator = allocation.Allocator(self._initial_count)
static_attributes = []
attributes = []
self.buffer_attributes = [] # list of (buffer, attributes)
for attribute, usage, vbo in attribute_usages:
if usage == GL_STATIC_DRAW:
# Group attributes for interleaved buffer
static_attributes.append(attribute)
attributes.append(attribute)
else:
# Create non-interleaved buffer
attributes.append(attribute)
attribute.buffer = vertexbuffer.create_mappable_buffer(
attribute.stride * self.allocator.capacity,
usage=usage, vbo=vbo)
attribute.buffer.element_size = attribute.stride
attribute.buffer.attributes = (attribute,)
self.buffer_attributes.append(
(attribute.buffer, (attribute,)))
# Create buffer for interleaved data
if static_attributes:
vertexattribute.interleave_attributes(static_attributes)
stride = static_attributes[0].stride
buffer = vertexbuffer.create_mappable_buffer(
stride * self.allocator.capacity, usage=GL_STATIC_DRAW)
buffer.element_size = stride
self.buffer_attributes.append(
(buffer, static_attributes))
for attribute in static_attributes:
attribute.buffer = buffer
# Create named attributes for each attribute
self.attributes = attributes
self.attribute_names = {}
for attribute in attributes:
if isinstance(attribute, vertexattribute.GenericAttribute):
index = attribute.index
if 'generic' not in self.attributes:
self.attribute_names['generic'] = {}
assert index not in self.attribute_names['generic'], \
'More than one generic attribute with index %d' % index
self.attribute_names['generic'][index] = attribute
else:
name = attribute.plural
assert name not in self.attributes, \
'More than one "%s" attribute given' % name
self.attribute_names[name] = attribute
def __del__(self):
# Break circular refs that Python GC seems to miss even when forced
# collection.
for attribute in self.attributes:
del attribute.buffer
def _safe_alloc(self, count):
'''Allocate vertices, resizing the buffers if necessary.'''
try:
return self.allocator.alloc(count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.alloc(count)
def _safe_realloc(self, start, count, new_count):
'''Reallocate vertices, resizing the buffers if necessary.'''
try:
return self.allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
for buffer, _ in self.buffer_attributes:
buffer.resize(capacity * buffer.element_size)
self.allocator.set_capacity(capacity)
return self.allocator.realloc(start, count, new_count)
def create(self, count):
'''Create a `VertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create.
:rtype: `VertexList`
'''
start = self._safe_alloc(count)
return VertexList(self, start, count)
def draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `VertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawArrays(mode, vertex_list.start, vertex_list.count)
else:
starts, sizes = self.allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawArrays(mode, starts[0], sizes[0])
elif gl_info.have_version(1, 4):
starts = (GLint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawArrays(mode, starts, sizes, primcount)
else:
for start, size in zip(starts, sizes):
glDrawArrays(mode, start, size)
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
def _is_empty(self):
return not self.allocator.starts
def __repr__(self):
return '<%s@%x %s>' % (self.__class__.__name__, id(self),
self.allocator)
class VertexList(object):
'''A list of vertices within a `VertexDomain`. Use
`VertexDomain.create` to construct this list.
'''
def __init__(self, domain, start, count):
# TODO make private
self.domain = domain
self.start = start
self.count = count
def get_size(self):
'''Get the number of vertices in the list.
:rtype: int
'''
return self.count
def get_domain(self):
'''Get the domain this vertex list belongs to.
:rtype: `VertexDomain`
'''
return self.domain
def draw(self, mode):
'''Draw this vertex list in the given OpenGL mode.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
'''
self.domain.draw(mode, self)
def resize(self, count):
'''Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
'''
new_start = self.domain._safe_realloc(self.start, self.count, count)
if new_start != self.start:
# Copy contents to new location
for attribute in self.domain.attributes:
old = attribute.get_region(attribute.buffer,
self.start, self.count)
new = attribute.get_region(attribute.buffer,
new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.start = new_start
self.count = count
self._colors_cache_version = None
self._fog_coords_cache_version = None
self._edge_flags_cache_version = None
self._normals_cache_version = None
self._secondary_colors_cache_version = None
self._tex_coords_cache_version = None
self._vertices_cache_version = None
def delete(self):
'''Delete this group.'''
self.domain.allocator.dealloc(self.start, self.count)
def migrate(self, domain):
'''Move this group from its current domain and add to the specified
one. Attributes on domains must match. (In practice, used to change
parent state of some vertices).
:Parameters:
`domain` : `VertexDomain`
Domain to migrate this vertex list to.
'''
assert domain.attribute_names.keys() == \
self.domain.attribute_names.keys(), 'Domain attributes must match.'
new_start = domain._safe_alloc(self.count)
for key, old_attribute in self.domain.attribute_names.items():
old = old_attribute.get_region(old_attribute.buffer,
self.start, self.count)
new_attribute = domain.attribute_names[key]
new = new_attribute.get_region(new_attribute.buffer,
new_start, self.count)
new.array[:] = old.array[:]
new.invalidate()
self.domain.allocator.dealloc(self.start, self.count)
self.domain = domain
self.start = new_start
self._colors_cache_version = None
self._fog_coords_cache_version = None
self._edge_flags_cache_version = None
self._normals_cache_version = None
self._secondary_colors_cache_version = None
self._tex_coords_cache_version = None
self._vertices_cache_version = None
def _set_attribute_data(self, i, data):
attribute = self.domain.attributes[i]
# TODO without region
region = attribute.get_region(attribute.buffer, self.start, self.count)
region.array[:] = data
region.invalidate()
# ---
def _get_colors(self):
if (self._colors_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['colors']
self._colors_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._colors_cache_version = domain._version
region = self._colors_cache
region.invalidate()
return region.array
def _set_colors(self, data):
self._get_colors()[:] = data
_colors_cache = None
_colors_cache_version = None
colors = property(_get_colors, _set_colors,
doc='''Array of color data.''')
# ---
def _get_fog_coords(self):
if (self._fog_coords_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['fog_coords']
self._fog_coords_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._fog_coords_cache_version = domain._version
region = self._fog_coords_cache
region.invalidate()
return region.array
def _set_fog_coords(self, data):
self._get_fog_coords()[:] = data
_fog_coords_cache = None
_fog_coords_cache_version = None
fog_coords = property(_get_fog_coords, _set_fog_coords,
doc='''Array of fog coordinate data.''')
# ---
def _get_edge_flags(self):
if (self._edge_flags_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['edge_flags']
self._edge_flags_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._edge_flags_cache_version = domain._version
region = self._edge_flags_cache
region.invalidate()
return region.array
def _set_edge_flags(self, data):
self._get_edge_flags()[:] = data
_edge_flags_cache = None
_edge_flags_cache_version = None
edge_flags = property(_get_edge_flags, _set_edge_flags,
doc='''Array of edge flag data.''')
# ---
def _get_normals(self):
if (self._normals_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['normals']
self._normals_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._normals_cache_version = domain._version
region = self._normals_cache
region.invalidate()
return region.array
def _set_normals(self, data):
self._get_normals()[:] = data
_normals_cache = None
_normals_cache_version = None
normals = property(_get_normals, _set_normals,
doc='''Array of normal vector data.''')
# ---
def _get_secondary_colors(self):
if (self._secondary_colors_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['secondary_colors']
self._secondary_colors_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._secondary_colors_cache_version = domain._version
region = self._secondary_colors_cache
region.invalidate()
return region.array
def _set_secondary_colors(self, data):
self._get_secondary_colors()[:] = data
_secondary_colors_cache = None
_secondary_colors_cache_version = None
secondary_colors = property(_get_secondary_colors, _set_secondary_colors,
doc='''Array of secondary color data.''')
# ---
_tex_coords_cache = None
_tex_coords_cache_version = None
def _get_tex_coords(self):
if (self._tex_coords_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['tex_coords']
self._tex_coords_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._tex_coords_cache_version = domain._version
region = self._tex_coords_cache
region.invalidate()
return region.array
def _set_tex_coords(self, data):
self._get_tex_coords()[:] = data
tex_coords = property(_get_tex_coords, _set_tex_coords,
doc='''Array of texture coordinate data.''')
# ---
_vertices_cache = None
_vertices_cache_version = None
def _get_vertices(self):
if (self._vertices_cache_version != self.domain._version):
domain = self.domain
attribute = domain.attribute_names['vertices']
self._vertices_cache = attribute.get_region(
attribute.buffer, self.start, self.count)
self._vertices_cache_version = domain._version
region = self._vertices_cache
region.invalidate()
return region.array
def _set_vertices(self, data):
self._get_vertices()[:] = data
vertices = property(_get_vertices, _set_vertices,
doc='''Array of vertex coordinate data.''')
class IndexedVertexDomain(VertexDomain):
'''Management of a set of indexed vertex lists.
Construction of an indexed vertex domain is usually done with the
`create_indexed_domain` function.
'''
_initial_index_count = 16
def __init__(self, attribute_usages, index_gl_type=GL_UNSIGNED_INT):
super(IndexedVertexDomain, self).__init__(attribute_usages)
self.index_allocator = allocation.Allocator(self._initial_index_count)
self.index_gl_type = index_gl_type
self.index_c_type = vertexattribute._c_types[index_gl_type]
self.index_element_size = ctypes.sizeof(self.index_c_type)
self.index_buffer = vertexbuffer.create_mappable_buffer(
self.index_allocator.capacity * self.index_element_size,
target=GL_ELEMENT_ARRAY_BUFFER)
def _safe_index_alloc(self, count):
'''Allocate indices, resizing the buffers if necessary.'''
try:
return self.index_allocator.alloc(count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.alloc(count)
def _safe_index_realloc(self, start, count, new_count):
'''Reallocate indices, resizing the buffers if necessary.'''
try:
return self.index_allocator.realloc(start, count, new_count)
except allocation.AllocatorMemoryException, e:
capacity = _nearest_pow2(e.requested_capacity)
self._version += 1
self.index_buffer.resize(capacity * self.index_element_size)
self.index_allocator.set_capacity(capacity)
return self.index_allocator.realloc(start, count, new_count)
def create(self, count, index_count):
'''Create an `IndexedVertexList` in this domain.
:Parameters:
`count` : int
Number of vertices to create
`index_count`
Number of indices to create
'''
start = self._safe_alloc(count)
index_start = self._safe_index_alloc(index_count)
return IndexedVertexList(self, start, count, index_start, index_count)
def get_index_region(self, start, count):
'''Get a region of the index buffer.
:Parameters:
`start` : int
Start of the region to map.
`count` : int
Number of indices to map.
:rtype: Array of int
'''
byte_start = self.index_element_size * start
byte_count = self.index_element_size * count
ptr_type = ctypes.POINTER(self.index_c_type * count)
return self.index_buffer.get_region(byte_start, byte_count, ptr_type)
def draw(self, mode, vertex_list=None):
'''Draw vertices in the domain.
If `vertex_list` is not specified, all vertices in the domain are
drawn. This is the most efficient way to render primitives.
If `vertex_list` specifies a `VertexList`, only primitives in that
list will be drawn.
:Parameters:
`mode` : int
OpenGL drawing mode, e.g. ``GL_POINTS``, ``GL_LINES``, etc.
`vertex_list` : `IndexedVertexList`
Vertex list to draw, or ``None`` for all lists in this domain.
'''
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
for buffer, attributes in self.buffer_attributes:
buffer.bind()
for attribute in attributes:
attribute.enable()
attribute.set_pointer(attribute.buffer.ptr)
self.index_buffer.bind()
if vertexbuffer._workaround_vbo_finish:
glFinish()
if vertex_list is not None:
glDrawElements(mode, vertex_list.index_count, self.index_gl_type,
self.index_buffer.ptr +
vertex_list.index_start * self.index_element_size)
else:
starts, sizes = self.index_allocator.get_allocated_regions()
primcount = len(starts)
if primcount == 0:
pass
elif primcount == 1:
# Common case
glDrawElements(mode, sizes[0], self.index_gl_type,
self.index_buffer.ptr + starts[0])
elif gl_info.have_version(1, 4):
if not isinstance(self.index_buffer,
vertexbuffer.VertexBufferObject):
starts = [s + self.index_buffer.ptr for s in starts]
starts = (GLuint * primcount)(*starts)
sizes = (GLsizei * primcount)(*sizes)
glMultiDrawElements(mode, sizes, self.index_gl_type, starts,
primcount)
else:
for start, size in zip(starts, sizes):
glDrawElements(mode, size, self.index_gl_type,
self.index_buffer.ptr +
start * self.index_element_size)
self.index_buffer.unbind()
for buffer, _ in self.buffer_attributes:
buffer.unbind()
glPopClientAttrib()
class IndexedVertexList(VertexList):
'''A list of vertices within an `IndexedVertexDomain` that are indexed.
Use `IndexedVertexDomain.create` to construct this list.
'''
def __init__(self, domain, start, count, index_start, index_count):
super(IndexedVertexList, self).__init__(domain, start, count)
self.index_start = index_start
self.index_count = index_count
def draw(self, mode):
self.domain.draw(mode, self)
def resize(self, count, index_count):
'''Resize this group.
:Parameters:
`count` : int
New number of vertices in the list.
`index_count` : int
New number of indices in the list.
'''
old_start = self.start
super(IndexedVertexList, self).resize(count)
# Change indices (because vertices moved)
if old_start != self.start:
diff = self.start - old_start
self.indices[:] = map(lambda i: i + diff, self.indices)
# Resize indices
new_start = self.domain._safe_index_realloc(
self.index_start, self.index_count, index_count)
if new_start != self.index_start:
old = self.domain.get_index_region(
self.index_start, self.index_count)
new = self.domain.get_index_region(
self.index_start, self.index_count)
new.array[:] = old.array[:]
new.invalidate()
self.index_start = new_start
self.index_count = index_count
self._indices_cache_version = None
def delete(self):
'''Delete this group.'''
super(IndexedVertexList, self).delete()
self.domain.index_allocator.dealloc(self.index_start, self.index_count)
def _set_index_data(self, data):
# TODO without region
region = self.domain.get_index_region(
self.index_start, self.index_count)
region.array[:] = data
region.invalidate()
# ---
def _get_indices(self):
if self._indices_cache_version != self.domain._version:
domain = self.domain
self._indices_cache = domain.get_index_region(
self.index_start, self.index_count)
self._indices_cache_version = domain._version
region = self._indices_cache
region.invalidate()
return region.array
def _set_indices(self, data):
self._get_indices()[:] = data
_indices_cache = None
_indices_cache_version = None
indices = property(_get_indices, _set_indices,
doc='''Array of index data.''')
| bsd-3-clause | 432292064e09b6dbee601de8c1383751 | 36.068123 | 79 | 0.598911 | 4.317216 | false | false | false | false |
mattpap/sympy-polys | sympy/polys/tests/test_specialpolys.py | 2 | 2492 | """Tests for functions for generating interesting polynomials. """
from sympy import Poly, ZZ, raises
from sympy.polys.specialpolys import (
swinnerton_dyer_poly,
cyclotomic_poly,
symmetric_poly,
fateman_poly_F_1,
dmp_fateman_poly_F_1,
fateman_poly_F_2,
dmp_fateman_poly_F_2,
fateman_poly_F_3,
dmp_fateman_poly_F_3,
)
from sympy.abc import x, y, z
def test_swinnerton_dyer_poly():
raises(ValueError, "swinnerton_dyer_poly(0, x)")
assert swinnerton_dyer_poly(1, x, polys=True) == Poly(x**2 - 2)
assert swinnerton_dyer_poly(1, x) == x**2 - 2
assert swinnerton_dyer_poly(2, x) == x**4 - 10*x**2 + 1
assert swinnerton_dyer_poly(3, x) == x**8 - 40*x**6 + 352*x**4 - 960*x**2 + 576
def test_cyclotomic_poly():
raises(ValueError, "cyclotomic_poly(0, x)")
assert cyclotomic_poly(1, x, polys=True) == Poly(x - 1)
assert cyclotomic_poly(1, x) == x - 1
assert cyclotomic_poly(2, x) == x + 1
assert cyclotomic_poly(3, x) == x**2 + x + 1
assert cyclotomic_poly(4, x) == x**2 + 1
assert cyclotomic_poly(5, x) == x**4 + x**3 + x**2 + x + 1
assert cyclotomic_poly(6, x) == x**2 - x + 1
def test_symmetric_poly():
raises(ValueError, "symmetric_poly(-1, x, y, z)")
raises(ValueError, "symmetric_poly(5, x, y, z)")
assert symmetric_poly(1, x, y, z, polys=True) == Poly(x + y + z)
assert symmetric_poly(1, (x, y, z), polys=True) == Poly(x + y + z)
assert symmetric_poly(0, x, y, z) == 1
assert symmetric_poly(1, x, y, z) == x + y + z
assert symmetric_poly(2, x, y, z) == x*y + x*z + y*z
assert symmetric_poly(3, x, y, z) == x*y*z
def test_fateman_poly_F_1():
f,g,h = fateman_poly_F_1(1)
F,G,H = dmp_fateman_poly_F_1(1, ZZ)
assert [ t.rep.rep for t in [f,g,h] ] == [F,G,H]
f,g,h = fateman_poly_F_1(3)
F,G,H = dmp_fateman_poly_F_1(3, ZZ)
assert [ t.rep.rep for t in [f,g,h] ] == [F,G,H]
def test_fateman_poly_F_2():
f,g,h = fateman_poly_F_2(1)
F,G,H = dmp_fateman_poly_F_2(1, ZZ)
assert [ t.rep.rep for t in [f,g,h] ] == [F,G,H]
f,g,h = fateman_poly_F_2(3)
F,G,H = dmp_fateman_poly_F_2(3, ZZ)
assert [ t.rep.rep for t in [f,g,h] ] == [F,G,H]
def test_fateman_poly_F_3():
f,g,h = fateman_poly_F_3(1)
F,G,H = dmp_fateman_poly_F_3(1, ZZ)
assert [ t.rep.rep for t in [f,g,h] ] == [F,G,H]
f,g,h = fateman_poly_F_3(3)
F,G,H = dmp_fateman_poly_F_3(3, ZZ)
assert [ t.rep.rep for t in [f,g,h] ] == [F,G,H]
| bsd-3-clause | da1d49048b299090ae930c923e81bad2 | 28.666667 | 83 | 0.574639 | 2.223015 | false | true | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/gl/glext_missing.py | 11 | 5393 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Additional hand-coded GL extensions.
These are hand-wrapped extension tokens and functions that are in
the OpenGL Extension Registry but have not yet been added to either
the registry's glext.h or nVidia's glext.h. Remove wraps from here
when the headers are updated (and glext_arb.py or glext_nv.py are
regenerated).
When adding an extension here, include the name and URL, and any tokens and
functions appearing under "New Tokens" and "New Procedures" headings. Don't
forget to add the GL_/gl prefix.
Unnumbered extensions in the registry are not included.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: glext_missing.py 1579 2008-01-15 14:47:19Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_GL as _link_function
from pyglet.gl.lib import c_ptrdiff_t
# At time of writing, ABI glext.h was last updated 2005/06/20, so numbered
# non-ARB extensions from 312 on must be included here.
# GL_EXT_packed_depth_stencil
# http://oss.sgi.com/projects/ogl-sample/registry/EXT/packed_depth_stencil.txt
GL_DEPTH_STENCIL_EXT = 0x84F9
GL_UNSIGNED_INT_24_8_EXT = 0x84FA
GL_DEPTH24_STENCIL8_EXT = 0x88F0
GL_TEXTURE_STENCIL_SIZE_EXT = 0x88F1
# GL_EXT_texture_sRGB
# http://oss.sgi.com/projects/ogl-sample/registry/EXT/texture_sRGB.txt
GL_SRGB_EXT = 0x8C40
GL_SRGB8_EXT = 0x8C41
GL_SRGB_ALPHA_EXT = 0x8C42
GL_SRGB8_ALPHA8_EXT = 0x8C43
GL_SLUMINANCE_ALPHA_EXT = 0x8C44
GL_SLUMINANCE8_ALPHA8_EXT = 0x8C45
GL_SLUMINANCE_EXT = 0x8C46
GL_SLUMINANCE8_EXT = 0x8C47
GL_COMPRESSED_SRGB_EXT = 0x8C48
GL_COMPRESSED_SRGB_ALPHA_EXT = 0x8C49
GL_COMPRESSED_SLUMINANCE_EXT = 0x8C4A
GL_COMPRESSED_SLUMINANCE_ALPHA_EXT = 0x8C4B
GL_COMPRESSED_SRGB_S3TC_DXT1_EXT = 0x8C4C
GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT = 0x8C4D
GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT = 0x8C4E
GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT = 0x8C4F
# GL_EXT_stencil_clear_tag
# http://oss.sgi.com/projects/ogl-sample/registry/EXT/stencil_clear_tag.txt
GLuint = c_uint # /usr/include/GL/gl.h:62
GLsizei = c_int # /usr/include/GL/gl.h:59
glStencilClearTagEXT = _link_function(
'glStencilClearTagEXT', None, [GLsizei, GLuint])
GL_STENCIL_TAG_BITS_EXT = 0x88F2
GL_STENCIL_CLEAR_TAG_VALUE_EXT = 0x88F3
# GL_EXT_framebuffer_blit
# http://oss.sgi.com/projects/ogl-sample/registry/EXT/framebuffer_blit.txt
GLenum = c_uint # /usr/include/GL/gl.h:53
GLint = c_int # /usr/include/GL/gl.h:58
glBlitFramebufferEXT = _link_function(
'glBlitFramebufferEXT', None, [GLint, GLint, GLint, GLint,
GLint, GLint, GLint, GLint,
GLuint, GLenum])
GL_READ_FRAMEBUFFER_EXT = 0x8CA8
GL_DRAW_FRAMEBUFFER_EXT = 0x8CA9
GL_DRAW_FRAMEBUFFER_BINDING_EXT = 0x8CA6
GL_READ_FRAMEBUFFER_BINDING_EXT = 0x8CAA
# GL_EXT_framebuffer_multisample
# http://oss.sgi.com/projects/ogl-sample/registry/EXT/framebuffer_multisample.txt
GL_RENDERBUFFER_SAMPLES_EXT = 0x8CAB
# GL_MESAX_texture_stack
# http://oss.sgi.com/projects/ogl-sample/registry/MESAX/texture_stack.txt
GL_TEXTURE_1D_STACK_MESAX = 0x8759
GL_TEXTURE_2D_STACK_MESAX = 0x875A
GL_PROXY_TEXTURE_1D_STACK_MESAX = 0x875B
GL_PROXY_TEXTURE_2D_STACK_MESAX = 0x875C
GL_TEXTURE_1D_STACK_BINDING_MESAX = 0x875D
GL_TEXTURE_2D_STACK_BINDING_MESAX = 0x875E
| bsd-3-clause | 6e820e2383e401386f2eb66108f678a3 | 41.464567 | 81 | 0.67124 | 3.206302 | false | false | false | false |
mattpap/sympy-polys | sympy/ntheory/factor_.py | 1 | 17811 | """
Integer factorization
"""
from sympy.core import Mul
from sympy.core.numbers import igcd
from sympy.core.power import integer_nthroot, Pow
from sympy.core.mul import Mul
import random
import math
from primetest import isprime
from generate import sieve, primerange
from sympy.utilities.iterables import iff
from sympy.core.singleton import S
small_trailing = [i and max(int(not i % 2**j) and j for j in range(1,8)) \
for i in range(256)]
def trailing(n):
"""Count the number of trailing zero digits in the binary
representation of n, i.e. determine the largest power of 2
that divides n."""
if not n:
return 0
low_byte = n & 0xff
if low_byte:
return small_trailing[low_byte]
t = 0
p = 8
while not n & 1:
while not n & ((1<<p)-1):
n >>= p
t += p
p *= 2
p //= 2
return t
def multiplicity(p, n):
"""
Find the greatest integer m such that p**m divides n.
Example usage
=============
>>> from sympy.ntheory import multiplicity
>>> [multiplicity(5, n) for n in [8, 5, 25, 125, 250]]
[0, 1, 2, 3, 3]
"""
if p == 2:
return trailing(n)
m = 0
n, rem = divmod(n, p)
while not rem:
m += 1
if m > 5:
# The multiplicity could be very large. Better
# to increment in powers of two
e = 2
while 1:
ppow = p**e
if ppow < n:
nnew, rem = divmod(n, ppow)
if not rem:
m += e
e *= 2
n = nnew
continue
return m + multiplicity(p, n)
n, rem = divmod(n, p)
return m
def perfect_power(n, candidates=None, recursive=True):
"""
Return ``(a, b)`` such that ``n`` == ``a**b`` if ``n`` is a
perfect power; otherwise return ``None``.
By default, attempts to determine the largest possible ``b``.
With ``recursive=False``, the smallest possible ``b`` will
be chosen (this will be a prime number).
"""
if n < 3:
return None
logn = math.log(n,2)
max_possible = int(logn)+2
if not candidates:
candidates = primerange(2, max_possible)
for b in candidates:
if b > max_possible:
break
# Weed out downright impossible candidates
if logn/b < 40:
a = 2.0**(logn/b)
if abs(int(a+0.5)-a) > 0.01:
continue
# print b
r, exact = integer_nthroot(n, b)
if exact:
if recursive:
m = perfect_power(r)
if m:
return m[0], b*m[1]
return r, b
def pollard_rho(n, retries=5, max_steps=None, seed=1234):
"""Use Pollard's rho method to try to extract a nontrivial factor
of ``n``. The returned factor may be a composite number. If no
factor is found, ``None`` is returned.
The algorithm may need to take thousands of steps before
it finds a factor or reports failure. If ``max_steps`` is
specified, the iteration is canceled with a failure after
the specified number of steps.
On failure, the algorithm will self-restart (with different
parameters) up to ``retries`` number of times.
The rho algorithm is a Monte Carlo method whose outcome can
be affected by changing the random seed value.
References
==========
- Richard Crandall & Carl Pomerance (2005), "Prime Numbers:
A Computational Perspective", Springer, 2nd edition, 229-231
"""
prng = random.Random(seed + retries)
for i in range(retries):
# Alternative good nonrandom choice: a = 1
a = prng.randint(1, n-3)
# Alternative good nonrandom choice: s = 2
s = prng.randint(0, n-1)
U = V = s
F = lambda x: (x**2 + a) % n
j = 0
while 1:
if max_steps and (j > max_steps):
break
j += 1
U = F(U)
V = F(F(V))
g = igcd(abs(U-V), n)
if g == 1:
continue
if g == n:
break
return int(g)
return None
def pollard_pm1(n, B=10, seed=1234):
"""
Use Pollard's p-1 method to try to extract a nontrivial factor
of ``n``. The returned factor may be a composite number. If no
factor is found, ``None`` is returned.
The search is performed up to a smoothness bound ``B``.
Choosing a larger B increases the likelihood of finding
a large factor.
The p-1 algorithm is a Monte Carlo method whose outcome can
be affected by changing the random seed value.
Example usage
=============
With the default smoothness bound, this number can't be cracked:
>>> from sympy.ntheory import pollard_pm1
>>> pollard_pm1(21477639576571)
Increasing the smoothness bound helps:
>>> pollard_pm1(21477639576571, B=2000)
4410317
References
==========
- Richard Crandall & Carl Pomerance (2005), "Prime Numbers:
A Computational Perspective", Springer, 2nd edition, 236-238
"""
prng = random.Random(seed + B)
a = prng.randint(2, n-1)
for p in sieve.primerange(2, B):
e = int(math.log(B, p))
a = pow(a, p**e, n)
g = igcd(a-1, n)
if 1 < g < n:
return int(g)
else:
return None
def _trial(factors, n, candidates=None, verbose=False, force_finalize=False):
"""
Helper function for integer factorization. Trial factors ``n`
against all integers given in the sequence ``candidates``
and updates the dict ``factors`` in-place. Raises
``StopIteration`` if ``n`` becomes equal to 1, otherwise
returns the reduced value of ``n`` and a flag indicating
whether any factors were found.
"""
if not candidates:
return n, False
found_something = False
for k in candidates:
# This check is slightly faster for small n and slightly
# slower for large n...
if n % k:
continue
m = multiplicity(k, n)
if m:
found_something = True
if verbose:
print "-- %i (multiplicity %i)" % (k, m)
n //= (k**m)
factors[k] = m
if n == 1:
raise StopIteration
return int(n), found_something
def _check_termination(factors, n, verbose=False):
"""
Helper function for integer factorization. Checks if ``n``
is a prime or a perfect power, and in those cases updates
the factorization and raises ``StopIteration``.
"""
if verbose:
print "Checking if remaining factor terminates the factorization"
n = int(n)
if n == 1:
raise StopIteration
p = perfect_power(n)
if p:
base, exp = p
if verbose:
print "-- Remaining factor is a perfect power: %i ** %i" % (base, exp)
for b, e in factorint(base).iteritems():
factors[b] = exp*e
raise StopIteration
if isprime(n):
if verbose:
print "Remaining factor", n, "is prime"
factors[n] = 1
raise StopIteration
trial_msg = "Trial division with primes between %i and %i"
rho_msg = "Pollard's rho with retries %i, max_steps %i and seed %i"
pm1_msg = "Pollard's p-1 with smoothness bound %i and seed %i"
def factorint(n, limit=None, use_trial=True, use_rho=True, use_pm1=True,
verbose=False, visual=False):
"""
Given a positive integer ``n``, ``factorint(n)`` returns a dict containing
the prime factors of ``n`` as keys and their respective multiplicities
as values. For example:
>>> from sympy.ntheory import factorint
>>> factorint(2000) # 2000 = (2**4) * (5**3)
{2: 4, 5: 3}
>>> factorint(65537) # This number is prime
{65537: 1}
For input less than 2, factorint behaves as follows:
- ``factorint(1)`` returns the empty factorization, ``{}``
- ``factorint(0)`` returns ``{0:1}``
- ``factorint(-n)`` adds ``-1:1`` to the factors and then factors ``n``
Algorithm
=========
The function switches between multiple algorithms. Trial division
quickly finds small factors (of the order 1-5 digits), and finds
all large factors if given enough time. The Pollard rho and p-1
algorithms are used to find large factors ahead of time; they
will often find factors of the order of 10 digits within a few
seconds:
>>> factors = factorint(12345678910111213141516)
>>> for base, exp in sorted(factors.items()):
... print base, exp
...
2 2
2507191691 1
1231026625769 1
Any of these methods can optionally be disabled with the following
boolean parameters:
- ``use_trial``: Toggle use of trial division
- ``use_rho``: Toggle use of Pollard's rho method
- ``use_pm1``: Toggle use of Pollard's p-1 method
``factorint`` also periodically checks if the remaining part is
a prime number or a perfect power, and in those cases stops.
Partial Factorization
=====================
If ``limit`` (> 2) is specified, the search is stopped after performing
trial division up to (and including) the limit (or taking a
corresponding number of rho/p-1 steps). This is useful if one has
a large number and only is interested in finding small factors (if
any). Note that setting a limit does not prevent larger factors
from being found early; it simply means that the largest factor may
be composite.
This number, for example, has two small factors and a huge
semi-prime factor that cannot be reduced easily:
>>> from sympy.ntheory import isprime
>>> a = 1407633717262338957430697921446883
>>> f = factorint(a, limit=10000)
>>> f == {991: 1, 202916782076162456022877024859L: 1, 7: 1}
True
>>> isprime(max(f))
False
Visual Factorization
====================
If ``visual`` is set to ``True``, then it will return a visual
factorization of the integer. For example:
>>> from sympy import pprint
>>> pprint(factorint(4200, visual=True))
3 1 2 1
2 *3 *5 *7
Note that this is achieved by using the evaluate=False flag in Mul
and Pow. If you do other manipulations with an expression where
evaluate=False, it may evaluate. Therefore, you should use the
visual option only for visualization, and use the normal dictionary
returned by visual=False if you want to perform operations on the
factors.
If you find that you want one from the other but you do not want to
run expensive factorint again, you can easily switch between the two
forms using the following list comprehensions:
>>> from sympy import Mul, Pow
>>> regular = factorint(1764); regular
{2: 2, 3: 2, 7: 2}
>>> pprint(Mul(*[Pow(*i, **{'evaluate':False}) for i in regular.items()],
... **{'evaluate':False}))
2 2 2
2 *3 *7
>>> visual = factorint(1764, visual=True); pprint(visual)
2 2 2
2 *3 *7
>>> dict([i.args for i in visual.args])
{2: 2, 3: 2, 7: 2}
Miscellaneous Options
=====================
If ``verbose`` is set to ``True``, detailed progress is printed.
"""
if visual:
factordict = factorint(n, limit=limit, use_trial=use_trial, use_rho=use_rho,
use_pm1=use_pm1, verbose=verbose, visual=False)
if factordict == {}:
return S.One
return Mul(*[Pow(*i, **{'evaluate':False}) for i in factordict.items()],
**{'evaluate':False})
assert use_trial or use_rho or use_pm1
n = int(n)
if not n:
return {0:1}
if n < 0:
n = -n
factors = {-1:1}
else:
factors = {}
# Power of two
t = trailing(n)
if t:
factors[2] = t
n >>= t
if n == 1:
return factors
low, high = 3, 250
# It is sufficient to perform trial division up to sqrt(n)
try:
# add 1 to sqrt in case there is round off; add 1 overall to make
# sure that the limit is included
limit = iff(limit, lambda: max(limit, low), lambda: int(n**0.5) + 1) + 1
except OverflowError:
limit = 1e1000
# Setting to True here forces _check_termination if first round of
# trial division fails
found_trial_previous = True
if verbose and n < 1e300:
print "Factoring", n
while 1:
try:
high_ = min(high, limit)
# Trial division
if use_trial:
if verbose:
print trial_msg % (low, high_)
ps = sieve.primerange(low, high_)
n, found_trial = _trial(factors, n, ps, verbose)
else:
found_trial = False
if high > limit:
factors[n] = 1
raise StopIteration
# Only used advanced (and more expensive) methods as long as
# trial division fails to locate small factors
if not found_trial:
if found_trial_previous:
_check_termination(factors, n, verbose)
# Pollard p-1
if use_pm1 and not found_trial:
B = int(high_**0.7)
if verbose:
print (pm1_msg % (high_, high_))
ps = factorint(pollard_pm1(n, B=high_, seed=high_) or 1, \
limit=limit-1, verbose=verbose)
n, found_pm1 = _trial(factors, n, ps, verbose)
if found_pm1:
_check_termination(factors, n, verbose)
# Pollard rho
if use_rho and not found_trial:
max_steps = int(high_**0.7)
if verbose:
print (rho_msg % (1, max_steps, high_))
ps = factorint(pollard_rho(n, retries=1, max_steps=max_steps, \
seed=high_) or 1, limit=limit-1, verbose=verbose)
n, found_rho = _trial(factors, n, ps, verbose)
if found_rho:
_check_termination(factors, n, verbose)
except StopIteration:
return factors
found_trial_previous = found_trial
low, high = high, high*2
def primefactors(n, limit=None, verbose=False):
"""Return a sorted list of n's prime factors, ignoring multiplicity
and any composite factor that remains if the limit was set too low
for complete factorization. Unlike factorint(), primefactors() does
not return -1 or 0.
Example usage
=============
>>> from sympy.ntheory import primefactors, factorint, isprime
>>> primefactors(6)
[2, 3]
>>> primefactors(-5)
[5]
>>> sorted(factorint(123456).items())
[(2, 6), (3, 1), (643, 1)]
>>> primefactors(123456)
[2, 3, 643]
>>> sorted(factorint(10000000001, limit=200).items())
[(101, 1), (99009901, 1)]
>>> isprime(99009901)
False
>>> primefactors(10000000001, limit=300)
[101]
"""
n = int(n)
s = []
factors = sorted(factorint(n, limit=limit, verbose=verbose).keys())
s = [f for f in factors[:-1:] if f not in [-1, 0, 1]]
if factors and isprime(factors[-1]):
s += [factors[-1]]
return s
def _divisors(n):
"""Helper function for divisors which generates the divisors."""
factordict = factorint(n)
ps = sorted(factordict.keys())
def rec_gen(n = 0):
if n == len(ps):
yield 1
else :
pows = [1]
for j in xrange(factordict[ps[n]]):
pows.append(pows[-1] * ps[n])
for q in rec_gen(n + 1):
for p in pows:
yield p * q
for p in rec_gen() :
yield p
def divisors(n, generator=False):
"""
Return all divisors of n sorted from 1..n by default.
If generator is True an unordered generator is returned.
The number of divisors of n can be quite large if there are many
prime factors (counting repeated factors). If only the number of
factors is desired use divisor_count(n).
Examples::
>>> from sympy import divisors, divisor_count
>>> divisors(24)
[1, 2, 3, 4, 6, 8, 12, 24]
>>> divisor_count(24)
8
>>> list(divisors(120, generator=True))
[1, 2, 4, 8, 3, 6, 12, 24, 5, 10, 20, 40, 15, 30, 60, 120]
This is a slightly modified version of Tim Peters referenced at:
http://stackoverflow.com/questions/1010381/python-factorization
"""
n = abs(n)
if isprime(n):
return [1, n]
elif n == 1:
return [1]
elif n == 0:
return []
else:
rv = _divisors(n)
if not generator:
return sorted(rv)
return rv
def divisor_count(n):
"""Return the number of divisors of n.
Reference:
http://www.mayer.dial.pipex.com/maths/formulae.htm
>>> from sympy import divisor_count
>>> divisor_count(6)
4
"""
n = abs(n)
if n == 0:
return 0
return Mul(*[v+1 for k, v in factorint(n).items() if k > 1])
def totient(n):
"""Calculate the Euler totient function phi(n)
>>> from sympy.ntheory import totient
>>> totient(1)
1
>>> totient(25)
20
"""
if n < 1:
raise ValueError("n must be a positive integer")
factors = factorint(n)
t = 1
for p, k in factors.iteritems():
t *= (p-1) * p**(k-1)
return t
| bsd-3-clause | 6a9b55d048c0706b53ff9f0b7dd2dd10 | 29.868284 | 84 | 0.55971 | 3.827031 | false | false | false | false |
mattpap/sympy-polys | examples/intermediate/vandermonde.py | 10 | 4661 | #!/usr/bin/env python
"""Vandermonde matrix example
Demonstrates matrix computations using the Vandermonde matrix.
* http://en.wikipedia.org/wiki/Vandermonde_matrix
"""
from sympy import Matrix, pprint, Rational, sqrt, symbols, Symbol, zeros
def symbol_gen(sym_str):
"""Symbol generator
Generates sym_str_n where n is the number of times the generator
has been called.
"""
n = 0
while True:
yield Symbol("%s_%d" % (sym_str, n))
n += 1
def comb_w_rep(n, k):
"""Combinations with repetition
Returns the list of k combinations with repetition from n objects.
"""
if k == 0:
return [[]]
combs = [[i] for i in range(n)]
for i in range(k - 1):
curr = []
for p in combs:
for m in range(p[-1], n):
curr.append(p + [m])
combs = curr
return combs
def vandermonde(order, dim=1, syms='a b c d'):
"""Comptutes a Vandermonde matrix of given order and dimension.
Define syms to give beginning strings for temporary variables.
Returns the Matrix, the temporary variables, and the terms for the polynomials
"""
syms = syms.split()
if len(syms) < dim:
new_syms = []
for i in range(dim - len(syms)):
new_syms.append(syms[i%len(syms)] + str(i/len(syms)))
syms.extend(new_syms)
terms = []
for i in range(order + 1):
terms.extend(comb_w_rep(dim, i))
rank = len(terms)
V = zeros(rank)
generators = [symbol_gen(syms[i]) for i in range(dim)]
all_syms = []
for i in range(rank):
row_syms = [g.next() for g in generators]
all_syms.append(row_syms)
for j,term in enumerate(terms):
v_entry = 1
for k in term:
v_entry *= row_syms[k]
V[i*rank + j] = v_entry
return V, all_syms, terms
def gen_poly(points, order, syms):
"""Generates a polynomial using a Vandermonde system"""
num_pts = len(points)
if num_pts == 0:
raise ValueError, "Must provide points"
dim = len(points[0]) - 1
if dim > len(syms):
raise ValueError, \
"Must provide at lease %d symbols for the polynomial" % dim
V, tmp_syms, terms = vandermonde(order, dim)
if num_pts < V.shape[0]:
raise ValueError, \
"Must provide %d points for order %d, dimension "\
"%d polynomial, given %d points" % \
(V.shape[0], order, dim, num_pts)
elif num_pts > V.shape[0]:
print "gen_poly given %d points but only requires %d, "\
"continuing using the first %d points" % \
(num_pts, V.shape[0], V.shape[0])
num_pts = V.shape[0]
subs_dict = {}
for j in range(dim):
for i in range(num_pts):
subs_dict[tmp_syms[i][j]] = points[i][j]
V_pts = V.subs(subs_dict)
V_inv = V_pts.inv()
coeffs = V_inv.multiply(Matrix([points[i][-1] for i in xrange(num_pts)]))
f = 0
for j,term in enumerate(terms):
t = 1
for k in term:
t *= syms[k]
f += coeffs[j]*t
return f
def main():
order = 2
V, tmp_syms, _ = vandermonde(order)
print "Vandermonde matrix of order 2 in 1 dimension"
pprint(V)
print '-'*79
print "Computing the determinate and comparing to \sum_{0<i<j<=3}(a_j - a_i)"
det_sum = 1
for j in range(order + 1):
for i in range(j):
det_sum *= (tmp_syms[j][0] - tmp_syms[i][0])
print """
det(V) = %(det)s
\sum = %(sum)s
= %(sum_expand)s
""" % { "det": V.det(),
"sum": det_sum,
"sum_expand": det_sum.expand(),
}
print '-'*79
print "Polynomial fitting with a Vandermonde Matrix:"
x,y,z = symbols('x y z')
points = [(0,3), (1,2), (2,3)]
print """
Quadratic function, represented by 3 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 2, [x]),
}
points = [(0, 1, 1), (1, 0, 0), (1, 1, 0), (Rational(1, 2), 0, 0),
(0, Rational(1, 2), 0), (Rational(1, 2), Rational(1, 2), 0)]
print """
2D Quadratic function, represented by 6 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 2, [x, y]),
}
points = [(0, 1, 1, 1), (1, 1, 0, 0), (1, 0, 1, 0), (1, 1, 1, 1)]
print """
3D linear function, represented by 4 points:
points = %(pts)s
f = %(f)s
""" % { "pts" : points,
"f" : gen_poly(points, 1, [x, y, z]),
}
if __name__ == "__main__":
main()
| bsd-3-clause | ea47951ae5307503cf4bbff374bcb760 | 27.078313 | 82 | 0.525209 | 3.230076 | false | false | false | false |
mattpap/sympy-polys | sympy/mpmath/identification.py | 3 | 28881 | """
Implements the PSLQ algorithm for integer relation detection,
and derivative algorithms for constant recognition.
"""
from libmp import int_types, sqrt_fixed
# round to nearest integer (can be done more elegantly...)
def round_fixed(x, prec):
return ((x + (1<<(prec-1))) >> prec) << prec
class IdentificationMethods(object):
pass
def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False):
r"""
Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)``
uses the PSLQ algorithm to find a list of integers
`[c_0, c_1, ..., c_n]` such that
.. math ::
|c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol}
and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector
exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to
3/4 of the working precision.
**Examples**
Find rational approximations for `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> pslq([-1, pi], tol=0.01)
[22, 7]
>>> pslq([-1, pi], tol=0.001)
[355, 113]
>>> mpf(22)/7; mpf(355)/113; +pi
3.14285714285714
3.14159292035398
3.14159265358979
Pi is not a rational number with denominator less than 1000::
>>> pslq([-1, pi])
>>>
To within the standard precision, it can however be approximated
by at least one rational number with denominator less than `10^{12}`::
>>> p, q = pslq([-1, pi], maxcoeff=10**12)
>>> print p, q
238410049439 75888275702
>>> mpf(p)/q
3.14159265358979
The PSLQ algorithm can be applied to long vectors. For example,
we can investigate the rational (in)dependence of integer square
roots::
>>> mp.dps = 30
>>> pslq([sqrt(n) for n in range(2, 5+1)])
>>>
>>> pslq([sqrt(n) for n in range(2, 6+1)])
>>>
>>> pslq([sqrt(n) for n in range(2, 8+1)])
[2, 0, 0, 0, 0, 0, -1]
**Machin formulas**
A famous formula for `\pi` is Machin's,
.. math ::
\frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239
There are actually infinitely many formulas of this type. Two
others are
.. math ::
\frac{\pi}{4} = \operatorname{acot} 1
\frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57
+ 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443
We can easily verify the formulas using the PSLQ algorithm::
>>> mp.dps = 30
>>> pslq([pi/4, acot(1)])
[1, -1]
>>> pslq([pi/4, acot(5), acot(239)])
[1, -4, 1]
>>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)])
[1, -12, -32, 5, -12]
We could try to generate a custom Machin-like formula by running
the PSLQ algorithm with a few inverse cotangent values, for example
acot(2), acot(3) ... acot(10). Unfortunately, there is a linear
dependence among these values, resulting in only that dependence
being detected, with a zero coefficient for `\pi`::
>>> pslq([pi] + [acot(n) for n in range(2,11)])
[0, 1, -1, 0, 0, 0, -1, 0, 0, 0]
We get better luck by removing linearly dependent terms::
>>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)])
[1, -8, 0, 0, 4, 0, 0, 0]
In other words, we found the following formula::
>>> 8*acot(2) - 4*acot(7)
3.14159265358979323846264338328
>>> +pi
3.14159265358979323846264338328
**Algorithm**
This is a fairly direct translation to Python of the pseudocode given by
David Bailey, "The PSLQ Integer Relation Algorithm":
http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html
The present implementation uses fixed-point instead of floating-point
arithmetic, since this is significantly (about 7x) faster.
"""
n = len(x)
assert n >= 2
# At too low precision, the algorithm becomes meaningless
prec = ctx.prec
assert prec >= 53
if verbose and prec // max(2,n) < 5:
print "Warning: precision for PSLQ may be too low"
target = int(prec * 0.75)
if tol is None:
tol = ctx.mpf(2)**(-target)
else:
tol = ctx.convert(tol)
extra = 60
prec += extra
if verbose:
print "PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol))
tol = ctx.to_fixed(tol, prec)
assert tol
# Convert to fixed-point numbers. The dummy None is added so we can
# use 1-based indexing. (This just allows us to be consistent with
# Bailey's indexing. The algorithm is 100 lines long, so debugging
# a single wrong index can be painful.)
x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x]
# Sanity check on magnitudes
minx = min(abs(xx) for xx in x[1:])
if not minx:
raise ValueError("PSLQ requires a vector of nonzero numbers")
if minx < tol//100:
if verbose:
print "STOPPING: (one number is too small)"
return None
g = sqrt_fixed((4<<prec)//3, prec)
A = {}
B = {}
H = {}
# Initialization
# step 1
for i in xrange(1, n+1):
for j in xrange(1, n+1):
A[i,j] = B[i,j] = (i==j) << prec
H[i,j] = 0
# step 2
s = [None] + [0] * n
for k in xrange(1, n+1):
t = 0
for j in xrange(k, n+1):
t += (x[j]**2 >> prec)
s[k] = sqrt_fixed(t, prec)
t = s[1]
y = x[:]
for k in xrange(1, n+1):
y[k] = (x[k] << prec) // t
s[k] = (s[k] << prec) // t
# step 3
for i in xrange(1, n+1):
for j in xrange(i+1, n):
H[i,j] = 0
if i <= n-1:
if s[i]:
H[i,i] = (s[i+1] << prec) // s[i]
else:
H[i,i] = 0
for j in range(1, i):
sjj1 = s[j]*s[j+1]
if sjj1:
H[i,j] = ((-y[i]*y[j])<<prec)//sjj1
else:
H[i,j] = 0
# step 4
for i in xrange(2, n+1):
for j in xrange(i-1, 0, -1):
#t = floor(H[i,j]/H[j,j] + 0.5)
if H[j,j]:
t = round_fixed((H[i,j] << prec)//H[j,j], prec)
else:
#t = 0
continue
y[j] = y[j] + (t*y[i] >> prec)
for k in xrange(1, j+1):
H[i,k] = H[i,k] - (t*H[j,k] >> prec)
for k in xrange(1, n+1):
A[i,k] = A[i,k] - (t*A[j,k] >> prec)
B[k,j] = B[k,j] + (t*B[k,i] >> prec)
# Main algorithm
for REP in range(maxsteps):
# Step 1
m = -1
szmax = -1
for i in range(1, n):
h = H[i,i]
sz = (g**i * abs(h)) >> (prec*(i-1))
if sz > szmax:
m = i
szmax = sz
# Step 2
y[m], y[m+1] = y[m+1], y[m]
tmp = {}
for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i]
for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i]
for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m]
# Step 3
if m <= n - 2:
t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec)
# A zero element probably indicates that the precision has
# been exhausted. XXX: this could be spurious, due to
# using fixed-point arithmetic
if not t0:
break
t1 = (H[m,m] << prec) // t0
t2 = (H[m,m+1] << prec) // t0
for i in xrange(m, n+1):
t3 = H[i,m]
t4 = H[i,m+1]
H[i,m] = (t1*t3+t2*t4) >> prec
H[i,m+1] = (-t2*t3+t1*t4) >> prec
# Step 4
for i in xrange(m+1, n+1):
for j in xrange(min(i-1, m+1), 0, -1):
try:
t = round_fixed((H[i,j] << prec)//H[j,j], prec)
# Precision probably exhausted
except ZeroDivisionError:
break
y[j] = y[j] + ((t*y[i]) >> prec)
for k in xrange(1, j+1):
H[i,k] = H[i,k] - (t*H[j,k] >> prec)
for k in xrange(1, n+1):
A[i,k] = A[i,k] - (t*A[j,k] >> prec)
B[k,j] = B[k,j] + (t*B[k,i] >> prec)
# Until a relation is found, the error typically decreases
# slowly (e.g. a factor 1-10) with each step TODO: we could
# compare err from two successive iterations. If there is a
# large drop (several orders of magnitude), that indicates a
# "high quality" relation was detected. Reporting this to
# the user somehow might be useful.
best_err = maxcoeff<<prec
for i in xrange(1, n+1):
err = abs(y[i])
# Maybe we are done?
if err < tol:
# We are done if the coefficients are acceptable
vec = [int(round_fixed(B[j,i], prec) >> prec) for j in \
range(1,n+1)]
if max(abs(v) for v in vec) < maxcoeff:
if verbose:
print "FOUND relation at iter %i/%i, error: %s" % \
(REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1))
return vec
best_err = min(err, best_err)
# Calculate a lower bound for the norm. We could do this
# more exactly (using the Euclidean norm) but there is probably
# no practical benefit.
recnorm = max(abs(h) for h in H.values())
if recnorm:
norm = ((1 << (2*prec)) // recnorm) >> prec
norm //= 100
else:
norm = ctx.inf
if verbose:
print "%i/%i: Error: %8s Norm: %s" % \
(REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm)
if norm >= maxcoeff:
break
if verbose:
print "CANCELLING after step %i/%i." % (REP, maxsteps)
print "Could not find an integer relation. Norm bound: %s" % norm
return None
def findpoly(ctx, x, n=1, **kwargs):
r"""
``findpoly(x, n)`` returns the coefficients of an integer
polynomial `P` of degree at most `n` such that `P(x) \approx 0`.
If no polynomial having `x` as a root can be found,
:func:`~mpmath.findpoly` returns ``None``.
:func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with
the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ...,
`[1, x, x^2, .., x^n]` as input. Keyword arguments given to
:func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In
particular, you can specify a tolerance for `P(x)` with ``tol``
and a maximum permitted coefficient size with ``maxcoeff``.
For large values of `n`, it is recommended to run :func:`~mpmath.findpoly`
at high precision; preferably 50 digits or more.
**Examples**
By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear
polynomial with a rational root::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> findpoly(0.7)
[-10, 7]
The generated coefficient list is valid input to ``polyval`` and
``polyroots``::
>>> nprint(polyval(findpoly(phi, 2), phi), 1)
-2.0e-16
>>> for r in polyroots(findpoly(phi, 2)):
... print r
...
-0.618033988749895
1.61803398874989
Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are
solutions to quadratic equations. As we find here, `1+\sqrt 2`
is a root of the polynomial `x^2 - 2x - 1`::
>>> findpoly(1+sqrt(2), 2)
[1, -2, -1]
>>> findroot(lambda x: x**2 - 2*x - 1, 1)
2.4142135623731
Despite only containing square roots, the following number results
in a polynomial of degree 4::
>>> findpoly(sqrt(2)+sqrt(3), 4)
[1, 0, -10, 0, 1]
In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of
`r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of
lower degree having `r` as a root does not exist. Given sufficient
precision, :func:`~mpmath.findpoly` will usually find the correct
minimal polynomial of a given algebraic number.
**Non-algebraic numbers**
If :func:`~mpmath.findpoly` fails to find a polynomial with given
coefficient size and tolerance constraints, that means no such
polynomial exists.
We can verify that `\pi` is not an algebraic number of degree 3 with
coefficients less than 1000::
>>> mp.dps = 15
>>> findpoly(pi, 3)
>>>
It is always possible to find an algebraic approximation of a number
using one (or several) of the following methods:
1. Increasing the permitted degree
2. Allowing larger coefficients
3. Reducing the tolerance
One example of each method is shown below::
>>> mp.dps = 15
>>> findpoly(pi, 4)
[95, -545, 863, -183, -298]
>>> findpoly(pi, 3, maxcoeff=10000)
[836, -1734, -2658, -457]
>>> findpoly(pi, 3, tol=1e-7)
[-4, 22, -29, -2]
It is unknown whether Euler's constant is transcendental (or even
irrational). We can use :func:`~mpmath.findpoly` to check that if is
an algebraic number, its minimal polynomial must have degree
at least 7 and a coefficient of magnitude at least 1000000::
>>> mp.dps = 200
>>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000)
>>>
Note that the high precision and strict tolerance is necessary
for such high-degree runs, since otherwise unwanted low-accuracy
approximations will be detected. It may also be necessary to set
maxsteps high to prevent a premature exit (before the coefficient
bound has been reached). Running with ``verbose=True`` to get an
idea what is happening can be useful.
"""
x = ctx.mpf(x)
assert n >= 1
if x == 0:
return [1, 0]
xs = [ctx.mpf(1)]
for i in range(1,n+1):
xs.append(x**i)
a = ctx.pslq(xs, **kwargs)
if a is not None:
return a[::-1]
def fracgcd(p, q):
x, y = p, q
while y:
x, y = y, x % y
if x != 1:
p //= x
q //= x
if q == 1:
return p
return p, q
def pslqstring(r, constants):
q = r[0]
r = r[1:]
s = []
for i in range(len(r)):
p = r[i]
if p:
z = fracgcd(-p,q)
cs = constants[i][1]
if cs == '1':
cs = ''
else:
cs = '*' + cs
if isinstance(z, int_types):
if z > 0: term = str(z) + cs
else: term = ("(%s)" % z) + cs
else:
term = ("(%s/%s)" % z) + cs
s.append(term)
s = ' + '.join(s)
if '+' in s or '*' in s:
s = '(' + s + ')'
return s or '0'
def prodstring(r, constants):
q = r[0]
r = r[1:]
num = []
den = []
for i in range(len(r)):
p = r[i]
if p:
z = fracgcd(-p,q)
cs = constants[i][1]
if isinstance(z, int_types):
if abs(z) == 1: t = cs
else: t = '%s**%s' % (cs, abs(z))
([num,den][z<0]).append(t)
else:
t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1])
([num,den][z[0]<0]).append(t)
num = '*'.join(num)
den = '*'.join(den)
if num and den: return "(%s)/(%s)" % (num, den)
if num: return num
if den: return "1/(%s)" % den
def quadraticstring(ctx,t,a,b,c):
if c < 0:
a,b,c = -a,-b,-c
u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c)
u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c)
if abs(u1-t) < abs(u2-t):
if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c)
else:
if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c)
else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c)
return s
# Transformation y = f(x,c), with inverse function x = f(y,c)
# The third entry indicates whether the transformation is
# redundant when c = 1
transforms = [
(lambda ctx,x,c: x*c, '$y/$c', 0),
(lambda ctx,x,c: x/c, '$c*$y', 1),
(lambda ctx,x,c: c/x, '$c/$y', 0),
(lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0),
(lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1),
(lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0),
(lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1),
(lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1),
(lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1),
(lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0),
(lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1),
(lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0),
(lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1),
(lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1),
(lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1),
(lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0),
(lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1),
(lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0),
(lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1),
(lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1),
(lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0),
(lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0),
(lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1),
(lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0),
(lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1),
(lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1),
(lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0),
]
def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False,
verbose=False):
"""
Given a real number `x`, ``identify(x)`` attempts to find an exact
formula for `x`. This formula is returned as a string. If no match
is found, ``None`` is returned. With ``full=True``, a list of
matching formulas is returned.
As a simple example, :func:`~mpmath.identify` will find an algebraic
formula for the golden ratio::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> identify(phi)
'((1+sqrt(5))/2)'
:func:`~mpmath.identify` can identify simple algebraic numbers and simple
combinations of given base constants, as well as certain basic
transformations thereof. More specifically, :func:`~mpmath.identify`
looks for the following:
1. Fractions
2. Quadratic algebraic numbers
3. Rational linear combinations of the base constants
4. Any of the above after first transforming `x` into `f(x)` where
`f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either
directly or with `x` or `f(x)` multiplied or divided by one of
the base constants
5. Products of fractional powers of the base constants and
small integers
Base constants can be given as a list of strings representing mpmath
expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical
values and use the original strings for the output), or as a dict of
formula:value pairs.
In order not to produce spurious results, :func:`~mpmath.identify` should
be used with high precision; preferably 50 digits or more.
**Examples**
Simple identifications can be performed safely at standard
precision. Here the default recognition of rational, algebraic,
and exp/log of algebraic numbers is demonstrated::
>>> mp.dps = 15
>>> identify(0.22222222222222222)
'(2/9)'
>>> identify(1.9662210973805663)
'sqrt(((24+sqrt(48))/8))'
>>> identify(4.1132503787829275)
'exp((sqrt(8)/2))'
>>> identify(0.881373587019543)
'log(((2+sqrt(8))/2))'
By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard
precision it finds a not too useful approximation. At slightly
increased precision, this approximation is no longer accurate
enough and :func:`~mpmath.identify` more correctly returns ``None``::
>>> identify(pi)
'(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))'
>>> mp.dps = 30
>>> identify(pi)
>>>
Numbers such as `\pi`, and simple combinations of user-defined
constants, can be identified if they are provided explicitly::
>>> identify(3*pi-2*e, ['pi', 'e'])
'(3*pi + (-2)*e)'
Here is an example using a dict of constants. Note that the
constants need not be "atomic"; :func:`~mpmath.identify` can just
as well express the given number in terms of expressions
given by formulas::
>>> identify(pi+e, {'a':pi+2, 'b':2*e})
'((-2) + 1*a + (1/2)*b)'
Next, we attempt some identifications with a set of base constants.
It is necessary to increase the precision a bit.
>>> mp.dps = 50
>>> base = ['sqrt(2)','pi','log(2)']
>>> identify(0.25, base)
'(1/4)'
>>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base)
'(2*sqrt(2) + 3*pi + (5/7)*log(2))'
>>> identify(exp(pi+2), base)
'exp((2 + 1*pi))'
>>> identify(1/(3+sqrt(2)), base)
'((3/7) + (-1/7)*sqrt(2))'
>>> identify(sqrt(2)/(3*pi+4), base)
'sqrt(2)/(4 + 3*pi)'
>>> identify(5**(mpf(1)/3)*pi*log(2)**2, base)
'5**(1/3)*pi*log(2)**2'
An example of an erroneous solution being found when too low
precision is used::
>>> mp.dps = 15
>>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
'((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))'
>>> mp.dps = 50
>>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)'])
'1/(3*pi + (-4)*e + 2*sqrt(2))'
**Finding approximate solutions**
The tolerance ``tol`` defaults to 3/4 of the working precision.
Lowering the tolerance is useful for finding approximate matches.
We can for example try to generate approximations for pi::
>>> mp.dps = 15
>>> identify(pi, tol=1e-2)
'(22/7)'
>>> identify(pi, tol=1e-3)
'(355/113)'
>>> identify(pi, tol=1e-10)
'(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))'
With ``full=True``, and by supplying a few base constants,
``identify`` can generate almost endless lists of approximations
for any number (the output below has been truncated to show only
the first few)::
>>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True):
... print p
... # doctest: +ELLIPSIS
e/log((6 + (-4/3)*e))
(3**3*5*e*catalan**2)/(2*7**2)
sqrt(((-13) + 1*e + 22*catalan))
log(((-6) + 24*e + 4*catalan)/e)
exp(catalan*((-1/5) + (8/15)*e))
catalan*(6 + (-6)*e + 15*catalan)
sqrt((5 + 26*e + (-3)*catalan))/e
e*sqrt(((-27) + 2*e + 25*catalan))
log(((-1) + (-11)*e + 59*catalan))
((3/20) + (21/20)*e + (3/20)*catalan)
...
The numerical values are roughly as close to `\pi` as permitted by the
specified tolerance:
>>> e/log(6-4*e/3)
3.14157719846001
>>> 135*e*catalan**2/98
3.14166950419369
>>> sqrt(e-13+22*catalan)
3.14158000062992
>>> log(24*e-6+4*catalan)-1
3.14158791577159
**Symbolic processing**
The output formula can be evaluated as a Python expression.
Note however that if fractions (like '2/3') are present in
the formula, Python's :func:`~mpmath.eval()` may erroneously perform
integer division. Note also that the output is not necessarily
in the algebraically simplest form::
>>> identify(sqrt(2))
'(sqrt(8)/2)'
As a solution to both problems, consider using SymPy's
:func:`~mpmath.sympify` to convert the formula into a symbolic expression.
SymPy can be used to pretty-print or further simplify the formula
symbolically::
>>> from sympy import sympify
>>> sympify(identify(sqrt(2)))
2**(1/2)
Sometimes :func:`~mpmath.identify` can simplify an expression further than
a symbolic algorithm::
>>> from sympy import simplify
>>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)')
>>> x
(3/2 - 5**(1/2)/2)**(-1/2)
>>> x = simplify(x)
>>> x
2/(6 - 2*5**(1/2))**(1/2)
>>> mp.dps = 30
>>> x = sympify(identify(x.evalf(30)))
>>> x
1/2 + 5**(1/2)/2
(In fact, this functionality is available directly in SymPy as the
function :func:`~mpmath.nsimplify`, which is essentially a wrapper for
:func:`~mpmath.identify`.)
**Miscellaneous issues and limitations**
The input `x` must be a real number. All base constants must be
positive real numbers and must not be rationals or rational linear
combinations of each other.
The worst-case computation time grows quickly with the number of
base constants. Already with 3 or 4 base constants,
:func:`~mpmath.identify` may require several seconds to finish. To search
for relations among a large number of constants, you should
consider using :func:`~mpmath.pslq` directly.
The extended transformations are applied to x, not the constants
separately. As a result, ``identify`` will for example be able to
recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but
not ``2*exp(pi)+3``. It will be able to recognize the latter if
``exp(pi)`` is given explicitly as a base constant.
"""
solutions = []
def addsolution(s):
if verbose: print "Found: ", s
solutions.append(s)
x = ctx.mpf(x)
# Further along, x will be assumed positive
if x == 0:
if full: return ['0']
else: return '0'
if x < 0:
sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose)
if sol is None:
return sol
if full:
return ["-(%s)"%s for s in sol]
else:
return "-(%s)" % sol
if tol:
tol = ctx.mpf(tol)
else:
tol = ctx.eps**0.7
M = maxcoeff
if constants:
if isinstance(constants, dict):
constants = [(ctx.mpf(v), name) for (name, v) in constants.items()]
else:
namespace = dict((name, getattr(ctx,name)) for name in dir(ctx))
constants = [(eval(p, namespace), p) for p in constants]
else:
constants = []
# We always want to find at least rational terms
if 1 not in [value for (name, value) in constants]:
constants = [(ctx.mpf(1), '1')] + constants
# PSLQ with simple algebraic and functional transformations
for ft, ftn, red in transforms:
for c, cn in constants:
if red and cn == '1':
continue
t = ft(ctx,x,c)
# Prevent exponential transforms from wreaking havoc
if abs(t) > M**2 or abs(t) < tol:
continue
# Linear combination of base constants
r = ctx.pslq([t] + [a[0] for a in constants], tol, M)
s = None
if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
s = pslqstring(r, constants)
# Quadratic algebraic numbers
else:
q = ctx.pslq([ctx.one, t, t**2], tol, M)
if q is not None and len(q) == 3 and q[2]:
aa, bb, cc = q
if max(abs(aa),abs(bb),abs(cc)) <= M:
s = quadraticstring(ctx,t,aa,bb,cc)
if s:
if cn == '1' and ('/$c' in ftn):
s = ftn.replace('$y', s).replace('/$c', '')
else:
s = ftn.replace('$y', s).replace('$c', cn)
addsolution(s)
if not full: return solutions[0]
if verbose:
print "."
# Check for a direct multiplicative formula
if x != 1:
# Allow fractional powers of fractions
ilogs = [2,3,5,7]
# Watch out for existing fractional powers of fractions
logs = []
for a, s in constants:
if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs):
logs.append((ctx.ln(a), s))
logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs
r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M)
if r is not None and max(abs(uw) for uw in r) <= M and r[0]:
addsolution(prodstring(r, logs))
if not full: return solutions[0]
if full:
return sorted(solutions, key=len)
else:
return None
IdentificationMethods.pslq = pslq
IdentificationMethods.findpoly = findpoly
IdentificationMethods.identify = identify
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause | 2d38286cb6940644ca39129b7a2a7445 | 33.382143 | 84 | 0.529414 | 3.186342 | false | false | false | false |
mattpap/sympy-polys | sympy/concrete/products.py | 1 | 3802 | from sympy.core import Expr, S, C, Mul, sympify
from sympy.polys import quo, roots
from sympy.simplify import powsimp
class Product(Expr):
"""Represents unevaluated product.
"""
def __new__(cls, term, *symbols, **assumptions):
term = sympify(term)
if term.is_Number:
if term is S.NaN:
return S.NaN
elif term is S.Infinity:
return S.NaN
elif term is S.NegativeInfinity:
return S.NaN
elif term is S.Zero:
return S.Zero
elif term is S.One:
return S.One
if len(symbols) == 1:
symbol = symbols[0]
if isinstance(symbol, C.Equality):
k = symbol.lhs
a = symbol.rhs.start
n = symbol.rhs.end
elif isinstance(symbol, (tuple, list)):
k, a, n = symbol
else:
raise ValueError("Invalid arguments")
k, a, n = map(sympify, (k, a, n))
if isinstance(a, C.Number) and isinstance(n, C.Number):
return Mul(*[term.subs(k, i) for i in xrange(int(a), int(n)+1)])
else:
raise NotImplementedError
obj = Expr.__new__(cls, **assumptions)
obj._args = (term, k, a, n)
return obj
@property
def term(self):
return self._args[0]
@property
def index(self):
return self._args[1]
@property
def lower(self):
return self._args[2]
@property
def upper(self):
return self._args[3]
def doit(self, **hints):
term = self.term
lower = self.lower
upper = self.upper
if hints.get('deep', True):
term = term.doit(**hints)
lower = lower.doit(**hints)
upper = upper.doit(**hints)
prod = self._eval_product(lower, upper, term)
if prod is not None:
return powsimp(prod)
else:
return self
def _eval_product(self, a, n, term):
from sympy import sum, Sum
k = self.index
if not term.has(k):
return term**(n-a+1)
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
C_= poly.LC()
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a-r, n-a+1)
Q *= n - r
if len(all_roots) < poly.degree():
B = Product(quo(poly, Q.as_poly(k)), (k, a, n))
return poly.LC()**(n-a+1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(a, n, p)
q = self._eval_product(a, n, q)
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(a, n, t)
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
A, B = Mul(*exclude), Mul(*include)
return A * Product(B, (k, a, n))
elif term.is_Pow:
if not term.base.has(k):
s = sum(term.exp, (k, a, n))
if not isinstance(s, Sum):
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(a, n, term.base)
if p is not None:
return p**term.exp
def product(*args, **kwargs):
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| bsd-3-clause | 6e0b026fe601e8dba71daa11f5c98a25 | 25.22069 | 80 | 0.468175 | 3.907503 | false | false | false | false |
mattpap/sympy-polys | sympy/core/core.py | 1 | 7710 | """ The core's core. """
from assumptions import AssumeMeths, make__get_assumption
# used for canonical ordering of symbolic sequences
# via __cmp__ method:
# FIXME this is *so* irrelevant and outdated!
ordering_of_classes = [
# singleton numbers
'Zero', 'One','Half','Infinity','NaN','NegativeOne','NegativeInfinity',
# numbers
'Integer','Rational','Real',
# singleton symbols
'Exp1','Pi','ImaginaryUnit',
# symbols
'Symbol','Wild','Temporary',
# Functions that should come before Pow/Add/Mul
'ApplyConjugate', 'ApplyAbs',
# arithmetic operations
'Pow', 'Mul', 'Add',
# function values
'Apply',
'ApplyExp','ApplyLog',
'ApplySin','ApplyCos','ApplyTan','ApplyCot',
'ApplyASin','ApplyACos','ApplyATan','ApplyACot',
'ApplySinh','ApplyCosh','ApplyTanh','ApplyCoth',
'ApplyASinh','ApplyACosh','ApplyATanh','ApplyACoth',
'ApplyRisingFactorial','ApplyFallingFactorial',
'ApplyFactorial','ApplyBinomial',
'ApplyFloor', 'ApplyCeiling',
'ApplyRe','ApplyIm', 'ApplyArg',
'ApplySqrt','ApplySign',
'ApplyMrvLog',
'ApplyGamma','ApplyLowerGamma','ApplyUpperGamma','ApplyPolyGamma',
'ApplyErf',
'ApplyChebyshev','ApplyChebyshev2',
'Derivative','Integral',
# defined singleton functions
'Abs','Sign','Sqrt',
'Floor', 'Ceiling',
'Re', 'Im', 'Arg',
'Conjugate',
'Exp','Log','MrvLog',
'Sin','Cos','Tan','Cot','ASin','ACos','ATan','ACot',
'Sinh','Cosh','Tanh','Coth','ASinh','ACosh','ATanh','ACoth',
'RisingFactorial','FallingFactorial',
'Factorial','Binomial',
'Gamma','LowerGamma','UpperGamma','PolyGamma',
'Erf',
# special polynomials
'Chebyshev','Chebyshev2',
# undefined functions
'Function','WildFunction',
# anonymous functions
'Lambda',
# operators
'FDerivative','FApply',
# composition of functions
'FPow', 'Composition',
# Landau O symbol
'Order',
# relational operations
'Equality', 'Unequality', 'StrictInequality', 'Inequality',
]
class BasicType(type):
pass
class BasicMeta(BasicType):
classnamespace = {}
all_classes = set()
singleton = {}
keep_sign = False
def __init__(cls, *args, **kws):
n = cls.__name__
BasicMeta.all_classes.add(cls)
BasicMeta.classnamespace[n] = cls
# --- assumptions ---
# initialize default_assumptions dictionary
default_assumptions = {}
for k,v in cls.__dict__.iteritems():
if not k.startswith('is_'):
continue
# this is not an assumption (e.g. is_Integer)
if k[3:] not in AssumeMeths._assume_defined:
continue
k = k[3:]
if isinstance(v,(bool,int,long,type(None))):
if v is not None:
v = bool(v)
default_assumptions[k] = v
#print ' %r <-- %s' % (k,v)
# XXX maybe we should try to keep ._default_premises out of class ?
# XXX __slots__ in class ?
cls._default_premises = default_assumptions
for base in cls.__bases__:
try:
base_premises = base._default_premises
except AttributeError:
continue # no ._default_premises is ok
for k,v in base_premises.iteritems():
# if an assumption is already present in child, we should ignore base
# e.g. Integer.is_integer=T, but Rational.is_integer=F (for speed)
if k in default_assumptions:
continue
default_assumptions[k] = v
# deduce all consequences from default assumptions -- make it complete
xass = AssumeMeths._assume_rules.deduce_all_facts(default_assumptions)
# and store completed set into cls -- this way we'll avoid rededucing
# extensions of class default assumptions each time on instance
# creation -- we keep it prededuced already.
cls.default_assumptions = xass
#print '\t(%2i) %s' % (len(default_assumptions), default_assumptions)
#print '\t(%2i) %s' % (len(xass), xass)
# let's store new derived assumptions back into class.
# this will result in faster access to this attributes.
#
# Timings
# -------
#
# a = Integer(5)
# %timeit a.is_zero -> 20 us (without this optimization)
# %timeit a.is_zero -> 2 us (with this optimization)
#
#
# BTW: it is very important to study the lessons learned here --
# we could drop Basic.__getattr__ completely (!)
#
# %timeit x.is_Add -> 2090 ns (Basic.__getattr__ present)
# %timeit x.is_Add -> 825 ns (Basic.__getattr__ absent)
#
# so we may want to override all assumptions is_<xxx> methods and
# remove Basic.__getattr__
# first we need to collect derived premises
derived_premises = {}
for k,v in xass.iteritems():
if k not in default_assumptions:
derived_premises[k] = v
cls._derived_premises = derived_premises
for k,v in xass.iteritems():
assert v == cls.__dict__.get('is_'+k, v), (cls,k,v)
# NOTE: this way Integer.is_even = False (inherited from Rational)
# NOTE: the next code blocks add 'protection-properties' to overcome this
setattr(cls, 'is_'+k, v)
# protection e.g. for Initeger.is_even=F <- (Rational.is_integer=F)
for base in cls.__bases__:
try:
base_derived_premises = base._derived_premises
except AttributeError:
continue # no ._derived_premises is ok
for k,v in base_derived_premises.iteritems():
if not cls.__dict__.has_key('is_'+k):
#print '%s -- overriding: %s' % (cls.__name__, k)
is_k = make__get_assumption(cls.__name__, k)
setattr(cls, 'is_'+k, property(is_k))
def __cmp__(cls, other):
# If the other object is not a Basic subclass, then we are not equal to
# it.
if not isinstance(other, BasicType):
return -1
n1 = cls.__name__
n2 = other.__name__
c = cmp(n1,n2)
if not c: return 0
UNKNOWN = len(ordering_of_classes)+1
try:
i1 = ordering_of_classes.index(n1)
except ValueError:
#print 'Add',n1,'to basic.ordering_of_classes list'
#return c
i1 = UNKNOWN
try:
i2 = ordering_of_classes.index(n2)
except ValueError:
#print 'Add',n2,'to basic.ordering_of_classes list'
#return c
i2 = UNKNOWN
if i1 == UNKNOWN and i2 == UNKNOWN:
return c
return cmp(i1,i2)
def __lt__(cls, other):
if cls.__cmp__(other)==-1:
return True
return False
def __gt__(cls, other):
if cls.__cmp__(other)==1:
return True
return False
BasicMeta.all_classes.add(BasicMeta)
class ClassesRegistry:
"""Namespace for SymPy classes
This is needed to avoid problems with cyclic imports.
To get a SymPy class you do this:
C.<class_name>
e.g.
C.Rational
C.Add
"""
def __getattr__(self, name):
try:
cls = BasicMeta.classnamespace[name]
except KeyError:
raise AttributeError("No SymPy class '%s'" % name)
setattr(self, name, cls)
return cls
C = ClassesRegistry()
| bsd-3-clause | b18d496ee917142daff688a2b13f765b | 29.474308 | 85 | 0.558366 | 3.814943 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/media/drivers/directsound/__init__.py | 5 | 13783 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Windows DirectSound audio implementation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import math
import time
from pyglet.media import AudioPlayer, Listener, MediaException
from pyglet.media.drivers.directsound import lib_dsound as lib
from pyglet.window.win32 import _user32
class DirectSoundException(MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundAudioPlayer(AudioPlayer):
_buffer_size = 44800 * 1
_update_buffer_size = _buffer_size // 4
_buffer_size_secs = None
_cone_inner_angle = 360
_cone_outer_angle = 360
UPDATE_PERIOD = 0.05
def __init__(self, audio_format):
super(DirectSoundAudioPlayer, self).__init__(audio_format)
self._playing = False
self._timestamp = 0.
self._buffer = None
self._buffer_playing = False
self._data_size = 0 # amount of buffer filled by this player
self._play_cursor = 0
self._buffer_time = 0. # ts of buffer at buffer_time_pos
self._buffer_time_pos = 0
self._write_cursor = 0
self._timestamps = []
self._eos_count = 0
self._dirty_size = 0
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
self._buffer = lib.IDirectSoundBuffer()
dsound.CreateSoundBuffer(dsbdesc, ctypes.byref(self._buffer), None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer_size_secs = \
self._buffer_size / float(audio_format.bytes_per_second)
self._buffer.SetCurrentPosition(0)
def __del__(self):
try:
self._buffer.Stop()
self._buffer.Release()
if self._buffer3d:
self._buffer3d.Release()
except:
pass
def get_write_size(self):
if self._data_size < self._buffer_size:
return self._buffer_size - self._data_size
play_cursor = self._play_cursor
if self._write_cursor == play_cursor and self._buffer_playing:
# Polling too fast, no play cursor movement
return 0
elif self._write_cursor == play_cursor and not self._playing:
# Paused and up-to-date
return 0
elif self._write_cursor < play_cursor:
# Play cursor ahead of write cursor
write_size = play_cursor - self._write_cursor
else:
# Play cursor behind write cursor, wraps around
write_size = self._buffer_size - self._write_cursor + play_cursor
if write_size < self._update_buffer_size and not self._dirty_size:
return 0
return write_size
def write(self, audio_data, length=None):
# Pass audio_data=None, length>0 to write silence
if length is None:
write_size = self.get_write_size()
length = min(audio_data.length, write_size)
if length == 0:
return 0
if self._data_size < self._buffer_size:
self._data_size = min(self._data_size + length, self._buffer_size)
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
if self._write_cursor >= self._play_cursor:
wc = self._write_cursor
else:
wc = self._write_cursor + self._buffer_size
self._timestamps.append((wc, audio_data.timestamp))
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
pass
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor %= self._buffer_size
def write_eos(self):
if self._write_cursor > self._play_cursor:
wc = self._write_cursor
else:
wc = self._write_cursor + self._buffer_size
self._timestamps.append((wc, 'eos'))
def write_end(self):
if not self._dirty_size:
self._dirty_size = self._buffer_size
def pump(self):
# Update play cursor, check for wraparound and EOS markers
play_cursor = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor, None)
if play_cursor.value < self._play_cursor:
# Wrapped around
self._buffer_time_pos -= self._buffer_size
self._timestamps = \
[(a - self._buffer_size, t) for a, t in self._timestamps]
self._play_cursor = play_cursor.value
try:
while self._timestamps[0][0] < self._play_cursor:
pos, timestamp = self._timestamps.pop(0)
if timestamp == 'eos':
self._eos_count += 1
else:
self._buffer_time = timestamp
self._buffer_time_pos = pos
except IndexError:
pass
self._timestamp = self._buffer_time + \
(self._play_cursor - self._buffer_time_pos) \
/ float(self.audio_format.bytes_per_second)
# Write silence
if self._dirty_size:
write_size = self.get_write_size()
length = min(write_size, self._dirty_size)
self.write(None, length)
self._dirty_size -= length
if self._dirty_size < 0:
self._dirty_size = 0
if self._playing and not self._buffer_playing:
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self._buffer_playing = True
def get_time(self):
return self._timestamp
def play(self):
if self._playing:
return
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self._buffer_playing = True
def stop(self):
if not self._playing:
return
self._playing = False
self._buffer.Stop()
self._buffer_playing = False
def clear(self):
self._eos_count = 0
self._timestamps = []
self._write_cursor = 0
self._buffer.SetCurrentPosition(0)
self._buffer_time = 0.
self._buffer_time_pos = 0
self._data_size = 0
def clear_eos(self):
if self._eos_count > 0:
self._eos_count -= 1
return True
return False
def _get_source(self):
if self._sources:
return self._sources[0]
return None
def set_volume(self, volume):
volume = _db(volume)
self._buffer.SetVolume(volume)
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def set_min_distance(self, min_distance):
if self._buffer3d:
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
def set_max_distance(self, max_distance):
if self._buffer3d:
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self._buffer.SetFrequency(frequency)
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
class DirectSoundListener(Listener):
def _init(self):
# Called after driver_init()
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
def __del__(self):
try:
self._buffer.Release()
self._listener.Release()
except:
pass
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
dsound = None
def driver_init():
global dsound
dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(dsound), None)
# A trick used by mplayer.. use desktop as window handle since it would
# be complex to use pyglet window handles (and what to do when application
# is audio only?).
hwnd = _user32.GetDesktopWindow()
dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
driver_listener._init()
# Force a context switch, as some Windows audio drivers don't get time
# to process short sounds if Python hogs all the CPU. See issue #163.
from pyglet import clock
clock.Clock._force_sleep = True
driver_listener = DirectSoundListener()
driver_audio_player_class = DirectSoundAudioPlayer
| bsd-3-clause | e319dce1b4e747465297b0fd945dda5d | 33.982234 | 80 | 0.595952 | 3.812725 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/window/xlib/xinerama.py | 7 | 4022 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for Xinerama
Generated with:
tools/genwrappers.py
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: xinerama.py 1579 2008-01-15 14:47:19Z Alex.Holkner $'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xinerama')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
import pyglet.gl.glx
import pyglet.window.xlib.xlib
class struct_anon_181(Structure):
__slots__ = [
'screen_number',
'x_org',
'y_org',
'width',
'height',
]
struct_anon_181._fields_ = [
('screen_number', c_int),
('x_org', c_short),
('y_org', c_short),
('width', c_short),
('height', c_short),
]
XineramaScreenInfo = struct_anon_181 # /usr/include/X11/extensions/Xinerama.h:40
Display = pyglet.gl.glx.Display
# /usr/include/X11/extensions/Xinerama.h:44
XineramaQueryExtension = _lib.XineramaQueryExtension
XineramaQueryExtension.restype = c_int
XineramaQueryExtension.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/Xinerama.h:50
XineramaQueryVersion = _lib.XineramaQueryVersion
XineramaQueryVersion.restype = c_int
XineramaQueryVersion.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/Xinerama.h:56
XineramaIsActive = _lib.XineramaIsActive
XineramaIsActive.restype = c_int
XineramaIsActive.argtypes = [POINTER(Display)]
# /usr/include/X11/extensions/Xinerama.h:67
XineramaQueryScreens = _lib.XineramaQueryScreens
XineramaQueryScreens.restype = POINTER(XineramaScreenInfo)
XineramaQueryScreens.argtypes = [POINTER(Display), POINTER(c_int)]
__all__ = ['XineramaScreenInfo', 'XineramaQueryExtension',
'XineramaQueryVersion', 'XineramaIsActive', 'XineramaQueryScreens']
| bsd-3-clause | 329b497c17eaa4d2ae69885c7b32ca4e | 34.910714 | 84 | 0.702636 | 3.549868 | false | false | false | false |
mattpap/sympy-polys | sympy/printing/tests/test_conventions.py | 13 | 1648 | from sympy import symbols
from sympy.printing.conventions import split_super_sub
def test_super_sub():
assert split_super_sub("beta_13_2") == ("beta", [], ["13","2"])
assert split_super_sub("beta_132_20") == ("beta", [], ["132","20"])
assert split_super_sub("beta_13") == ("beta", [], ["13"])
assert split_super_sub("x_a_b") == ("x", [], ["a","b"])
assert split_super_sub("x_1_2_3") == ("x", [], ["1","2","3"])
assert split_super_sub("x_a_b1") == ("x", [], ["a","b1"])
assert split_super_sub("x_a_1") == ("x", [], ["a","1"])
assert split_super_sub("x_1_a") == ("x", [], ["1","a"])
assert split_super_sub("x_1^aa") == ("x", ["aa"], ["1"])
assert split_super_sub("x_1__aa") == ("x", ["aa"], ["1"])
assert split_super_sub("x_11^a") == ("x", ["a"], ["11"])
assert split_super_sub("x_11__a") == ("x", ["a"], ["11"])
assert split_super_sub("x_a_b_c_d") == ("x", [], ["a","b","c","d"])
assert split_super_sub("x_a_b^c^d") == ("x", ["c","d"], ["a","b"])
assert split_super_sub("x_a_b__c__d") == ("x", ["c","d"], ["a","b"])
assert split_super_sub("x_a^b_c^d") == ("x", ["b","d"], ["a","c"])
assert split_super_sub("x_a__b_c__d") == ("x", ["b","d"], ["a","c"])
assert split_super_sub("x^a^b_c_d") == ("x", ["a","b"], ["c","d"])
assert split_super_sub("x__a__b_c_d") == ("x", ["a","b"], ["c","d"])
assert split_super_sub("x^a^b^c^d") == ("x", ["a","b","c","d"], [])
assert split_super_sub("x__a__b__c__d") == ("x", ["a","b","c","d"], [])
assert split_super_sub("alpha_11") == ("alpha", [], ["11"])
assert split_super_sub("alpha_11_11") == ("alpha", [], ["11","11"])
| bsd-3-clause | a41c7b0a40ed07476e8e6b0bcd448fc9 | 60.037037 | 75 | 0.456917 | 2.53149 | false | false | false | false |
mattpap/sympy-polys | sympy/functions/special/bsplines.py | 2 | 4632 | from sympy.core.basic import Basic, S, C, sympify
from sympy.core.function import expand
from sympy.functions import Piecewise, piecewise_fold
from sympy.functions.elementary.piecewise import ExprCondPair
from sympy.core.sets import Interval
def _add_splines(c, b1, d, b2):
"""Construct c*b1 + d*b2."""
if b1 == S.Zero or c == S.Zero:
return expand(piecewise_fold(d*b2))
if b2 == S.Zero or d == S.Zero:
return expand(piecewise_fold(c*b1))
new_args = []
n_intervals = len(b1.args)
assert(n_intervals==len(b2.args))
new_args.append((expand(c*b1.args[0].expr), b1.args[0].cond))
for i in range(1, n_intervals-1):
new_args.append((
expand(c*b1.args[i].expr+d*b2.args[i-1].expr),
b1.args[i].cond
))
new_args.append((expand(d*b2.args[-2].expr), b2.args[-2].cond))
new_args.append(b2.args[-1])
return Piecewise(*new_args)
def bspline_basis(d, knots, n, x, close=True):
"""The n-th B-spline at x of degree d with knots.
B-Splines are piecewise polynomials of degree d [1]. They are defined on
a set of knots, which is a sequence of integers or floats.
The 0th degree splines have a value of one on a single interval:
>>> from sympy import bspline_basis
>>> from sympy.abc import x
>>> d = 0
>>> knots = range(5)
>>> bspline_basis(d, knots, 0, x)
Piecewise((1, [0, 1]), (0, True))
For a given (d, knots) there are len(knots)-d-1 B-splines defined, that
are indexed by n (starting at 0).
Here is an example of a cubic B-spline:
>>> bspline_basis(3, range(5), 0, x)
Piecewise((x**3/6, [0, 1)), (2/3 - 2*x + 2*x**2 - x**3/2, [1, 2)), (-22/3 + 10*x - 4*x**2 + x**3/2, [2, 3)), (32/3 - 8*x + 2*x**2 - x**3/6, [3, 4]), (0, True))
By repeating knot points, you can introduce discontinuities in the
B-splines and their derivatives:
>>> d = 1
>>> knots = [0,0,2,3,4]
>>> bspline_basis(d, knots, 0, x)
Piecewise((1 - x/2, [0, 2]), (0, True))
It is quite time consuming to construct and evaluate B-splines. If you
need to evaluate a B-splines many times, it is best to lambdify them
first:
>>> from sympy import lambdify
>>> d = 3
>>> knots = range(10)
>>> b0 = bspline_basis(d, knots, 0, x)
>>> f = lambdify(x, b0)
>>> y = f(0.5)
[1] http://en.wikipedia.org/wiki/B-spline
"""
knots = [sympify(k) for k in knots]
d = int(d)
n = int(n)
n_knots = len(knots)
n_intervals = n_knots-1
if n+d+1 > n_intervals:
raise ValueError('n+d+1 must not exceed len(knots)-1')
if d==0:
result = Piecewise(
(S.One, Interval(knots[n], knots[n+1], False, True)),
(0, True)
)
elif d > 0:
denom = knots[n+d] - knots[n]
if denom != S.Zero:
A = (x - knots[n])/denom
b1 = bspline_basis(d-1, knots, n, x, close=False)
else:
b1 = A = S.Zero
denom = knots[n+d+1] - knots[n+1]
if denom != S.Zero:
B = (knots[n+d+1] - x)/denom
b2 = bspline_basis(d-1, knots, n+1, x, close=False)
else:
b2 = B = S.Zero
result = _add_splines(A, b1, B, b2)
else:
raise ValueError('degree must be non-negative: %r' % n)
if close:
final_ec_pair = result.args[-2]
final_cond = final_ec_pair.cond
final_expr = final_ec_pair.expr
new_args = final_cond.args[:3] + (False,)
new_ec_pair = ExprCondPair(final_expr, Interval(*new_args))
new_args = result.args[:-2] + (new_ec_pair, result.args[-1])
result = Piecewise(*new_args)
return result
def bspline_basis_set(d, knots, x):
"""Return the len(knots)-d-1 B-splines at x of degree d with knots.
This function returns a list of Piecewise polynomials that are the
len(knots)-d-1 B-splines of degree d for the given knots. This function
calls bspline_basis(d, knots, n, x) for different values of n.
>>> from sympy import bspline_basis_set
>>> from sympy.abc import x
>>> d = 2
>>> knots = range(5)
>>> splines = bspline_basis_set(d, knots, x)
>>> splines
[Piecewise((x**2/2, [0, 1)), (-3/2 + 3*x - x**2, [1, 2)), (9/2 - 3*x + x**2/2, [2, 3]), (0, True)), Piecewise((1/2 - x + x**2/2, [1, 2)), (-11/2 + 5*x - x**2, [2, 3)), (8 - 4*x + x**2/2, [3, 4]), (0, True))]
"""
splines = []
n_splines = len(knots)-d-1
return [bspline_basis(d, knots, i, x) for i in range(n_splines)]
| bsd-3-clause | c0b757d38d2b6bb5924c29070659e156 | 35.1875 | 215 | 0.552893 | 2.815805 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/window/xlib/__init__.py | 5 | 59054 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
__docformat__ = 'restructuredtext'
__version__ = '$Id: __init__.py 2262 2008-09-16 14:33:09Z Alex.Holkner $'
from ctypes import *
import unicodedata
import warnings
import pyglet
from pyglet.window import WindowException, NoSuchDisplayException, \
MouseCursorException, Platform, Display, Screen, MouseCursor, \
DefaultMouseCursor, ImageMouseCursor, BaseWindow, _PlatformEventHandler
from pyglet.window import event
from pyglet.window import key
from pyglet.window import mouse
from pyglet.event import EventDispatcher
from pyglet import gl
from pyglet.gl import gl_info
from pyglet.gl import glu_info
from pyglet.gl import glx
from pyglet.gl import glxext_arb
from pyglet.gl import glxext_mesa
from pyglet.gl import glx_info
import pyglet.window.xlib.xlib
from pyglet.window.xlib import cursorfont
try:
import pyglet.window.xlib.xinerama
_have_xinerama = True
except:
_have_xinerama = False
try:
import pyglet.window.xlib.xsync
_have_xsync = True
except:
_have_xsync = False
class mwmhints_t(Structure):
_fields_ = [
('flags', c_uint32),
('functions', c_uint32),
('decorations', c_uint32),
('input_mode', c_int32),
('status', c_uint32)
]
XA_CARDINAL = 6 # Xatom.h:14
# Do we have the November 2000 UTF8 extension?
_have_utf8 = hasattr(xlib._lib, 'Xutf8TextListToTextProperty')
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
# Set up error handler
def _error_handler(display, event):
# By default, all errors are silently ignored: this has a better chance
# of working than the default behaviour of quitting ;-)
#
# We've actually never seen an error that was our fault; they're always
# driver bugs (and so the reports are useless). Nevertheless, set
# environment variable PYGLET_DEBUG_X11 to 1 to get dumps of the error
# and a traceback (execution will continue).
if pyglet.options['debug_x11']:
event = event.contents
buf = c_buffer(1024)
xlib.XGetErrorText(display, event.error_code, buf, len(buf))
print 'X11 error:', buf.value
print ' serial:', event.serial
print ' request:', event.request_code
print ' minor:', event.minor_code
print ' resource:', event.resourceid
import traceback
print 'Python stack trace (innermost last):'
traceback.print_stack()
return 0
_error_handler_ptr = xlib.XErrorHandler(_error_handler)
xlib.XSetErrorHandler(_error_handler_ptr)
class XlibException(WindowException):
'''An X11-specific exception. This exception is probably a programming
error in pyglet.'''
pass
class XlibMouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
class XlibPlatform(Platform):
def __init__(self):
self._displays = {}
def get_display(self, name):
if name not in self._displays:
self._displays[name] = XlibDisplayDevice(name)
return self._displays[name]
def get_default_display(self):
return self.get_display('')
class XlibDisplayDevice(Display):
_display = None # POINTER(xlib.Display)
_x_im = None # X input method
# TODO close _x_im when display connection closed.
_enable_xsync = False
def __init__(self, name):
super(XlibDisplayDevice, self).__init__()
self._display = xlib.XOpenDisplay(name)
if not self._display:
raise NoSuchDisplayException('Cannot connect to "%s"' % name)
self.info = glx_info.GLXInfo(self._display)
# Also set the default GLX display for future info queries
glx_info.set_display(self._display.contents)
self._fileno = xlib.XConnectionNumber(self._display)
self._window_map = {}
# Initialise XSync
if _have_xsync:
event_base = c_int()
error_base = c_int()
if xsync.XSyncQueryExtension(self._display,
byref(event_base),
byref(error_base)):
major_version = c_int()
minor_version = c_int()
if xsync.XSyncInitialize(self._display,
byref(major_version),
byref(minor_version)):
self._enable_xsync = True
def fileno(self):
return self._fileno
def get_screens(self):
x_screen = xlib.XDefaultScreen(self._display)
if _have_xinerama and xinerama.XineramaIsActive(self._display):
number = c_int()
infos = xinerama.XineramaQueryScreens(self._display,
byref(number))
infos = cast(infos,
POINTER(xinerama.XineramaScreenInfo * number.value)).contents
result = []
for info in infos:
result.append(XlibScreen(self,
x_screen,
info.x_org,
info.y_org,
info.width,
info.height,
True))
xlib.XFree(infos)
return result
else:
# No xinerama
screen_count = xlib.XScreenCount(self._display)
result = []
for i in range(screen_count):
screen = xlib.XScreenOfDisplay(self._display, i)
result.append(XlibScreen(self,
i,
0, 0,
screen.contents.width,
screen.contents.height,
False))
# Move default screen to be first in list.
s = result.pop(x_screen)
result.insert(0, s)
return result
class XlibScreen(Screen):
def __init__(self, display, x_screen_id, x, y, width, height, xinerama):
super(XlibScreen, self).__init__(x, y, width, height)
self.display = display
self._x_screen_id = x_screen_id
self._xinerama = xinerama
def get_matching_configs(self, template):
x_display = self.display._display
have_13 = self.display.info.have_version(1, 3)
if have_13:
config_class = XlibGLConfig13
else:
if 'ATI' in self.display.info.get_client_vendor():
config_class = XlibGLConfig10ATI
else:
config_class = XlibGLConfig10
# Construct array of attributes
attrs = []
for name, value in template.get_gl_attributes():
attr = config_class.attribute_ids.get(name, None)
if attr and value is not None:
attrs.extend([attr, int(value)])
if have_13:
attrs.extend([glx.GLX_X_RENDERABLE, True])
else:
attrs.extend([glx.GLX_RGBA, True])
if len(attrs):
attrs.extend([0, 0])
attrib_list = (c_int * len(attrs))(*attrs)
else:
attrib_list = None
if have_13:
elements = c_int()
configs = glx.glXChooseFBConfig(x_display, self._x_screen_id,
attrib_list, byref(elements))
if not configs:
return []
configs = cast(configs,
POINTER(glx.GLXFBConfig * elements.value)).contents
result = [config_class(self, c) for c in configs]
# Can't free array until all XlibGLConfig13's are GC'd. Too much
# hassle, live with leak. XXX
#xlib.XFree(configs)
return result
else:
try:
return [config_class(self, attrib_list)]
except gl.ContextException:
return []
def __repr__(self):
return 'XlibScreen(screen=%d, x=%d, y=%d, ' \
'width=%d, height=%d, xinerama=%d)' % \
(self._x_screen_id, self.x, self.y, self.width, self.height,
self._xinerama)
class XlibGLConfig(gl.Config):
attribute_ids = {
'buffer_size': glx.GLX_BUFFER_SIZE,
'level': glx.GLX_LEVEL, # Not supported
'double_buffer': glx.GLX_DOUBLEBUFFER,
'stereo': glx.GLX_STEREO,
'aux_buffers': glx.GLX_AUX_BUFFERS,
'red_size': glx.GLX_RED_SIZE,
'green_size': glx.GLX_GREEN_SIZE,
'blue_size': glx.GLX_BLUE_SIZE,
'alpha_size': glx.GLX_ALPHA_SIZE,
'depth_size': glx.GLX_DEPTH_SIZE,
'stencil_size': glx.GLX_STENCIL_SIZE,
'accum_red_size': glx.GLX_ACCUM_RED_SIZE,
'accum_green_size': glx.GLX_ACCUM_GREEN_SIZE,
'accum_blue_size': glx.GLX_ACCUM_BLUE_SIZE,
'accum_alpha_size': glx.GLX_ACCUM_ALPHA_SIZE,
}
def create_context(self, share):
context = self._create_glx_context(share)
if context == glx.GLX_BAD_CONTEXT:
raise gl.ContextException('Invalid context share')
elif context == glx.GLXBadFBConfig:
raise gl.ContextException('Invalid GL configuration')
elif context < 0:
raise gl.ContextException('Could not create GL context')
return XlibGLContext(self, context, share)
def _create_glx_context(self, share):
raise NotImplementedError('abstract')
def is_complete(self):
return True
def get_visual_info(self):
raise NotImplementedError('abstract')
class XlibGLConfig10(XlibGLConfig):
def __init__(self, screen, attrib_list):
self.screen = screen
self._display = screen.display._display
self._visual_info = glx.glXChooseVisual(self._display,
screen._x_screen_id, attrib_list)
if not self._visual_info:
raise gl.ContextException('No conforming visual exists')
for name, attr in self.attribute_ids.items():
value = c_int()
result = glx.glXGetConfig(self._display,
self._visual_info, attr, byref(value))
if result >= 0:
setattr(self, name, value.value)
self.sample_buffers = 0
self.samples = 0
def get_visual_info(self):
return self._visual_info.contents
def _create_glx_context(self, share):
if share:
return glx.glXCreateContext(self._display, self._visual_info,
share._context, True)
else:
return glx.glXCreateContext(self._display, self._visual_info,
None, True)
class XlibGLConfig10ATI(XlibGLConfig10):
attribute_ids = XlibGLConfig.attribute_ids.copy()
del attribute_ids['stereo']
stereo = False
class XlibGLConfig13(XlibGLConfig):
attribute_ids = XlibGLConfig.attribute_ids.copy()
attribute_ids.update({
'sample_buffers': glx.GLX_SAMPLE_BUFFERS,
'samples': glx.GLX_SAMPLES,
# Not supported in current pyglet API:
'render_type': glx.GLX_RENDER_TYPE,
'config_caveat': glx.GLX_CONFIG_CAVEAT,
'transparent_type': glx.GLX_TRANSPARENT_TYPE,
'transparent_index_value': glx.GLX_TRANSPARENT_INDEX_VALUE,
'transparent_red_value': glx.GLX_TRANSPARENT_RED_VALUE,
'transparent_green_value': glx.GLX_TRANSPARENT_GREEN_VALUE,
'transparent_blue_value': glx.GLX_TRANSPARENT_BLUE_VALUE,
'transparent_alpha_value': glx.GLX_TRANSPARENT_ALPHA_VALUE,
# Used internally
'x_renderable': glx.GLX_X_RENDERABLE,
})
def __init__(self, screen, fbconfig):
super(XlibGLConfig13, self).__init__()
self.screen = screen
self._display = screen.display._display
self._fbconfig = fbconfig
for name, attr in self.attribute_ids.items():
value = c_int()
result = glx.glXGetFBConfigAttrib(
self._display, self._fbconfig, attr, byref(value))
if result >= 0:
setattr(self, name, value.value)
def get_visual_info(self):
return glx.glXGetVisualFromFBConfig(
self._display, self._fbconfig).contents
def _create_glx_context(self, share):
if share:
return glx.glXCreateNewContext(self._display, self._fbconfig,
glx.GLX_RGBA_TYPE, share._context, True)
else:
return glx.glXCreateNewContext(self._display, self._fbconfig,
glx.GLX_RGBA_TYPE, None, True)
class XlibGLContext(gl.Context):
def __init__(self, config, context, share):
super(XlibGLContext, self).__init__(share)
self.config = config
self._context = context
self._x_display = config.screen.display._display
def destroy(self):
super(XlibGLContext, self).destroy()
glx.glXDestroyContext(self._x_display, self._context)
def is_direct(self):
return glx.glXIsDirect(self._x_display, self._context)
# Platform event data is single item, so use platform event handler directly.
XlibEventHandler = _PlatformEventHandler
class XlibWindow(BaseWindow):
_x_display = None # X display connection
_x_screen_id = None # X screen index
_x_ic = None # X input context
_glx_context = None # GLX context handle
_glx_window = None # GLX window handle
_window = None # Xlib window handle
_minimum_size = None
_maximum_size = None
_x = 0
_y = 0 # Last known window position
_width = 0
_height = 0 # Last known window size
_mouse_exclusive_client = None # x,y of "real" mouse during exclusive
_mouse_buttons = [False] * 6 # State of each xlib button
_keyboard_exclusive = False
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_enable_xsync = False
_current_sync_value = None
_current_sync_valid = False
_needs_resize = False # True when resize event has been received but not
# dispatched
_default_event_mask = (0x1ffffff
& ~xlib.PointerMotionHintMask
& ~xlib.ResizeRedirectMask)
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
for name in self._platform_event_names:
if not hasattr(self, name):
continue
func = getattr(self, name)
for message in func._platform_event_data:
self._event_handlers[message] = func
super(XlibWindow, self).__init__(*args, **kwargs)
def _recreate(self, changes):
# If flipping to/from fullscreen and using override_redirect (we
# always are, _NET_WM_FULLSCREEN doesn't work), need to recreate the
# window.
#
# A possible improvement could be to just hide the top window,
# destroy the GLX window, and reshow it again when leaving fullscreen.
# This would prevent the floating window from being moved by the
# WM.
if 'fullscreen' in changes or 'resizable' in changes:
# clear out the GLX context
self.switch_to()
gl.glFlush()
glx.glXMakeCurrent(self._x_display, 0, None)
if self._glx_window:
glx.glXDestroyWindow(self._x_display, self._glx_window)
xlib.XDestroyWindow(self._x_display, self._window)
self._glx_window = None
del self.display._window_map[self._window]
self._window = None
self._mapped = False
# TODO: detect state loss only by examining context share.
if 'context' in changes:
self._lost_context = True
self._lost_context_state = True
self._create()
def _create(self):
# Unmap existing window if necessary while we fiddle with it.
if self._window and self._mapped:
self._unmap()
self.context.window = self
self._x_display = self.config._display
self._x_screen_id = self.screen._x_screen_id
self._glx_context = self.context._context
self._glx_1_3 = self.display.info.have_version(1, 3)
self._have_SGI_video_sync = \
self.display.info.have_extension('GLX_SGI_video_sync')
self._have_SGI_swap_control = \
self.display.info.have_extension('GLX_SGI_swap_control')
self._have_MESA_swap_control = \
self.display.info.have_extension('GLX_MESA_swap_control')
# In order of preference:
# 1. GLX_MESA_swap_control (more likely to work where video_sync will
# not)
# 2. GLX_SGI_video_sync (does not work on Intel 945GM, but that has
# MESA)
# 3. GLX_SGI_swap_control (cannot be disabled once enabled).
self._use_video_sync = (self._have_SGI_video_sync and
not self._have_MESA_swap_control)
# Create X window if not already existing.
if not self._window:
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(
self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if visual_id != default_visual_id:
window_attributes.colormap = xlib.XCreateColormap(
self._x_display, root, visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(
self._x_display, self._x_screen_id)
window_attributes.bit_gravity = xlib.NorthWestGravity
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration
# unless CWBackPixel is given in mask. Should have
# no effect on other systems, so it's set
# unconditionally.
mask = xlib.CWColormap | xlib.CWBitGravity | xlib.CWBackPixel
self._window = xlib.XCreateWindow(self._x_display, root,
0, 0, self._width, self._height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes))
self.display._window_map[self._window] = self
# Setting null background pixmap disables drawing the background,
# preventing flicker while resizing (in theory).
#
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration if
# this is called. As it doesn't seem to have any
# effect anyway, it's just commented out.
#xlib.XSetWindowBackgroundPixmap(self._x_display, self._window, 0)
self._enable_xsync = (pyglet.options['xsync'] and
self.display._enable_xsync and
self.config.double_buffer)
# Set supported protocols
protocols = []
protocols.append(xlib.XInternAtom(self._x_display,
'WM_DELETE_WINDOW', False))
if self._enable_xsync:
protocols.append(xlib.XInternAtom(self._x_display,
'_NET_WM_SYNC_REQUEST',
False))
protocols = (c_ulong * len(protocols))(*protocols)
xlib.XSetWMProtocols(self._x_display, self._window,
protocols, len(protocols))
# Create window resize sync counter
if self._enable_xsync:
value = xsync.XSyncValue()
self._sync_counter = xlib.XID(
xsync.XSyncCreateCounter(self._x_display, value))
atom = xlib.XInternAtom(self._x_display,
'_NET_WM_SYNC_REQUEST_COUNTER', False)
ptr = pointer(self._sync_counter)
xlib.XChangeProperty(self._x_display, self._window,
atom, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Set window attributes
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
# Bypass the window manager in fullscreen. This is the only reliable
# technique (over _NET_WM_STATE_FULLSCREEN, Motif, KDE and Gnome
# hints) that is pretty much guaranteed to work. Unfortunately
# we run into window activation and focus problems that require
# attention. Search for "override_redirect" for all occurences.
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window,
self.screen.x, self.screen.y,
self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window,
self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window,
attributes_mask, byref(attributes))
# Set style
styles = {
self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL',
self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG',
self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY',
}
if self._style in styles:
self._set_atoms_property('_NET_WM_WINDOW_TYPE',
(styles[self._style],))
elif self._style == self.WINDOW_STYLE_BORDERLESS:
MWM_HINTS_DECORATIONS = 1 << 1
PROP_MWM_HINTS_ELEMENTS = 5
mwmhints = mwmhints_t()
mwmhints.flags = MWM_HINTS_DECORATIONS
mwmhints.decorations = 0
name = xlib.XInternAtom(self._x_display, '_MOTIF_WM_HINTS', False)
xlib.XChangeProperty(self._x_display, self._window,
name, name, 32, xlib.PropModeReplace,
cast(pointer(mwmhints), POINTER(c_ubyte)),
PROP_MWM_HINTS_ELEMENTS)
# Set resizeable
if not self._resizable:
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
# Set caption
self.set_caption(self._caption)
# Create input context. A good but very outdated reference for this
# is http://www.sbin.org/doc/Xlib/chapt_11.html
if _have_utf8 and not self._x_ic:
if not self.display._x_im:
xlib.XSetLocaleModifiers('@im=none')
self.display._x_im = \
xlib.XOpenIM(self._x_display, None, None, None)
xlib.XFlush(self._x_display);
# Need to set argtypes on this function because it's vararg,
# and ctypes guesses wrong.
xlib.XCreateIC.argtypes = [xlib.XIM,
c_char_p, c_int,
c_char_p, xlib.Window,
c_char_p, xlib.Window,
c_void_p]
self._x_ic = xlib.XCreateIC(self.display._x_im,
'inputStyle', xlib.XIMPreeditNothing|xlib.XIMStatusNothing,
'clientWindow', self._window,
'focusWindow', self._window,
None)
filter_events = c_ulong()
xlib.XGetICValues(self._x_ic,
'filterEvents', byref(filter_events),
None)
self._default_event_mask |= filter_events.value
xlib.XSetICFocus(self._x_ic)
self.switch_to()
if self._visible:
self.set_visible(True)
self.set_mouse_platform_visible()
def _map(self):
if self._mapped:
return
# Map the window, wait for map event before continuing.
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.MapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._fullscreen:
# Possibly an override_redirect issue.
self.activate()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if not self._mapped:
return
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.UnmapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
return attributes.root
def close(self):
if not self._window:
return
# clear out the GLX context. Can fail if current context already
# destroyed (on exit, say).
try:
gl.glFlush()
except gl.GLException:
pass
if self._glx_1_3:
glx.glXMakeContextCurrent(self._x_display, 0, 0, None)
else:
glx.glXMakeCurrent(self._x_display, 0, None)
self._unmap()
if self._glx_window:
glx.glXDestroyWindow(self._x_display, self._glx_window)
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
self._window = None
self._glx_window = None
if _have_utf8:
xlib.XDestroyIC(self._x_ic)
self._x_ic = None
super(XlibWindow, self).close()
def switch_to(self):
if self._glx_1_3:
if not self._glx_window:
self._glx_window = glx.glXCreateWindow(self._x_display,
self._config._fbconfig, self._window, None)
glx.glXMakeContextCurrent(self._x_display,
self._glx_window, self._glx_window, self._glx_context)
else:
glx.glXMakeCurrent(self._x_display, self._window, self._glx_context)
self.set_vsync(self._vsync)
self._context.set_current()
gl_info.set_active_context()
glu_info.set_active_context()
def flip(self):
self.draw_mouse_cursor()
if self._vsync and self._have_SGI_video_sync and self._use_video_sync:
count = c_uint()
glxext_arb.glXGetVideoSyncSGI(byref(count))
glxext_arb.glXWaitVideoSyncSGI(
2, (count.value + 1) % 2, byref(count))
if self._glx_1_3:
if not self._glx_window:
self._glx_window = glx.glXCreateWindow(self._x_display,
self._config._fbconfig, self._window, None)
glx.glXSwapBuffers(self._x_display, self._glx_window)
else:
glx.glXSwapBuffers(self._x_display, self._window)
self._sync_resize()
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync
if not self._use_video_sync:
interval = vsync and 1 or 0
if self._have_MESA_swap_control:
glxext_mesa.glXSwapIntervalMESA(interval)
elif self._have_SGI_swap_control and interval:
# SGI_swap_control interval cannot be set to 0
glxext_arb.glXSwapIntervalSGI(interval)
def set_caption(self, caption):
if caption is None:
caption = ''
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def get_caption(self):
return self._caption
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
self._width = width
self._height = height
if not self._resizable:
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self.dispatch_event('on_resize', width, height)
def get_size(self):
# XGetGeometry and XWindowAttributes seem to always return the
# original size of the window, which is wrong after the user
# has resized it.
# XXX this is probably fixed now, with fix of resize.
return self._width, self._height
def set_location(self, x, y):
# Assume the window manager has reparented our top-level window
# only once, in which case attributes.x/y give the offset from
# the frame to the content window. Better solution would be
# to use _NET_FRAME_EXTENTS, where supported.
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
# XXX at least under KDE's WM these attrs are both 0
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._window,
self._get_root(),
0, 0,
byref(x),
byref(y),
byref(child))
return x.value, y.value
def activate(self):
xlib.XSetInputFocus(self._x_display, self._window,
xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible=True):
if visible:
self._map()
else:
self._unmap()
self._visible = visible
def set_minimum_size(self, width, height):
self._minimum_size = width, height
self._set_wm_normal_hints()
def set_maximum_size(self, width, height):
self._maximum_size = width, height
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ',
'_NET_WM_STATE_MAXIMIZED_VERT')
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_cursor.drawable
if not platform_visible:
# Hide pointer by creating an empty cursor
black = xlib.XBlackPixel(self._x_display, self._x_screen_id)
black = xlib.XColor()
bmp = xlib.XCreateBitmapFromData(self._x_display, self._window,
c_buffer(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bmp, bmp,
black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bmp)
else:
# Restore cursor
if isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window,
self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def _update_exclusivity(self):
mouse_exclusive = self._active and self._mouse_exclusive
keyboard_exclusive = self._active and self._keyboard_exclusive
if mouse_exclusive != self._applied_mouse_exclusive:
if mouse_exclusive:
self.set_mouse_platform_visible(False)
# Restrict to client area
xlib.XGrabPointer(self._x_display, self._window,
True,
0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._window,
0,
xlib.CurrentTime)
# Move pointer to center of window
x = self._width / 2
y = self._height / 2
self._mouse_exclusive_client = x, y
xlib.XWarpPointer(self._x_display,
0, # src window
self._window, # dst window
0, 0, # src x, y
0, 0, # src w, h
x, y)
else:
# Unclip
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if keyboard_exclusive != self._applied_keyboard_exclusive:
if keyboard_exclusive:
xlib.XGrabKeyboard(self._x_display,
self._window,
False,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if exclusive == self._mouse_exclusive:
return
self._mouse_exclusive = exclusive
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive == self._keyboard_exclusive:
return
self._keyboard_exclusive = exclusive
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
# NQR means default shape is not pretty... surely there is another
# cursor font?
cursor_shapes = {
self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair,
self.CURSOR_HAND: cursorfont.XC_hand2,
self.CURSOR_HELP: cursorfont.XC_question_arrow, # NQR
self.CURSOR_NO: cursorfont.XC_pirate, # NQR
self.CURSOR_SIZE: cursorfont.XC_fleur,
self.CURSOR_SIZE_UP: cursorfont.XC_top_side,
self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner,
self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side,
self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner,
self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side,
self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner,
self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side,
self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner,
self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow,
self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow,
self.CURSOR_TEXT: cursorfont.XC_xterm,
self.CURSOR_WAIT: cursorfont.XC_watch,
self.CURSOR_WAIT_ARROW: cursorfont.XC_watch, # NQR
}
if name not in cursor_shapes:
raise MouseCursorException('Unknown cursor name "%s"' % name)
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
# Careful! XChangeProperty takes an array of long when data type
# is 32-bit (but long can be 64 bit!), so pad high bytes of format if
# necessary.
import sys
format = {
('little', 4): 'BGRA',
('little', 8): 'BGRAAAAA',
('big', 4): 'ARGB',
('big', 8): 'AAAAARGB'
}[(sys.byteorder, sizeof(c_ulong))]
data = ''
for image in images:
image = image.get_image_data()
pitch = -(image.width * len(format))
s = c_buffer(sizeof(c_ulong) * 2)
memmove(s, cast((c_ulong * 2)(image.width, image.height),
POINTER(c_ubyte)), len(s))
data += s.raw + image.get_data(format, pitch)
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, '_NET_WM_ICON', False)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL,
32, xlib.PropModeReplace, buffer, len(data)/sizeof(c_ulong))
# Private utility
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
hints.min_width, hints.min_height = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
hints.max_width, hints.max_height = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, name, False)
if not atom:
raise XlibException('Undefined atom "%s"' % name)
assert type(value) in (str, unicode)
property = xlib.XTextProperty()
if _have_utf8 and allow_utf8:
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display,
cast(pointer(buf), c_char_p), 1, xlib.XUTF8StringStyle,
byref(property))
if result < 0:
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(
cast(pointer(buf), c_char_p), 1, byref(property))
if result < 0:
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display,
self._window, byref(property), atom)
# XXX <rj> Xlib doesn't like us freeing this
#xlib.XFree(property.value)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, name, False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, value, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
name_atom, atom_type, 32, mode,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
# Set property
net_wm_state = xlib.XInternAtom(self._x_display, '_NET_WM_STATE', False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, state, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
net_wm_state, atom_type, 32, xlib.PropModePrepend,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
# Nudge the WM
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for i, atom in enumerate(atoms):
e.xclient.data.l[i + 1] = atom
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.SubstructureRedirectMask, byref(e))
# Event handling
def dispatch_events(self):
self.dispatch_pending_events()
self._allow_dispatch_event = True
e = xlib.XEvent()
# Cache these in case window is closed from an event handler
_x_display = self._x_display
_window = self._window
# Check for the events specific to this window
while xlib.XCheckWindowEvent(_x_display, _window,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event(e)
# Generic events for this window (the window close event).
while xlib.XCheckTypedWindowEvent(_x_display, _window,
xlib.ClientMessage, byref(e)):
self.dispatch_platform_event(e)
if self._needs_resize:
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_expose')
self._needs_resize = False
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
# Dispatch any context-related events
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
def dispatch_platform_event(self, e):
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
@staticmethod
def _translate_modifiers(state):
modifiers = 0
if state & xlib.ShiftMask:
modifiers |= key.MOD_SHIFT
if state & xlib.ControlMask:
modifiers |= key.MOD_CTRL
if state & xlib.LockMask:
modifiers |= key.MOD_CAPSLOCK
if state & xlib.Mod1Mask:
modifiers |= key.MOD_ALT
if state & xlib.Mod2Mask:
modifiers |= key.MOD_NUMLOCK
if state & xlib.Mod4Mask:
modifiers |= key.MOD_WINDOWS
if state & xlib.Mod5Mask:
modifiers |= key.MOD_SCROLLLOCK
return modifiers
# Event handlers
'''
def _event_symbol(self, event):
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol.
symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)
if symbol == 0:
# XIM event
return None
elif symbol not in key._key_names.keys():
symbol = key.user_key(event.xkey.keycode)
return symbol
'''
def _event_text_symbol(self, ev):
text = None
symbol = xlib.KeySym()
buffer = create_string_buffer(128)
# Look up raw keysym before XIM filters it (default for keypress and
# keyrelease)
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
# Give XIM a shot
filtered = xlib.XFilterEvent(ev, ev.xany.window)
if ev.type == xlib.KeyPress and not filtered:
status = c_int()
if _have_utf8:
encoding = 'utf8'
count = xlib.Xutf8LookupString(self._x_ic,
ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), byref(status))
if status.value == xlib.XBufferOverflow:
raise NotImplementedError('TODO: XIM buffer resize')
else:
encoding = 'ascii'
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
if count:
status.value = xlib.XLookupBoth
if status.value & (xlib.XLookupChars | xlib.XLookupBoth):
text = buffer.value[:count].decode(encoding)
# Don't treat Unicode command codepoints as text, except Return.
if text and unicodedata.category(text) == 'Cc' and text != '\r':
text = None
symbol = symbol.value
# If the event is a XIM filtered event, the keysym will be virtual
# (e.g., aacute instead of A after a dead key). Drop it, we don't
# want these kind of key events.
if ev.xkey.keycode == 0 and not filtered:
symbol = None
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol. For keysyms outside the pyglet set, map
# raw key code to a user key.
if symbol and symbol not in key._key_names and ev.xkey.keycode:
symbol = key.user_key(ev.xkey.keycode)
if filtered:
# The event was filtered, text must be ignored, but the symbol is
# still good.
return None, symbol
return text, symbol
def _event_text_motion(self, symbol, modifiers):
if modifiers & key.MOD_ALT:
return None
ctrl = modifiers & key.MOD_CTRL != 0
return _motion_map.get((symbol, ctrl), None)
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key(self, ev):
if ev.type == xlib.KeyRelease:
# Look in the queue for a matching KeyPress with same timestamp,
# indicating an auto-repeat rather than actual key event.
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display,
self._window, xlib.KeyPress|xlib.KeyRelease,
byref(auto_event))
if not result:
break
saved.append(auto_event)
if auto_event.type == xlib.KeyRelease:
# just save this off for restoration back to the queue
continue
if ev.xkey.keycode == auto_event.xkey.keycode:
# Found a key repeat: dispatch EVENT_TEXT* event
text, symbol = self._event_text_symbol(auto_event)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event(
'on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
else:
# Key code of press did not match, therefore no repeating
# is going on, stop searching.
break
# Whoops, put the events back, it's for real.
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
text, symbol = self._event_text_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if ev.type == xlib.KeyPress:
if symbol:
self.dispatch_event('on_key_press', symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
elif ev.type == xlib.KeyRelease:
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify(self, ev):
x = ev.xmotion.x
y = self.height - ev.xmotion.y
dx = x - self._mouse_x
dy = y - self._mouse_y
if self._applied_mouse_exclusive and \
(ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client:
# Ignore events caused by XWarpPointer
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
# Reset pointer position
ex, ey = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display,
0,
self._window,
0, 0,
0, 0,
ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
@XlibEventHandler(xlib.ClientMessage)
def _event_clientmessage(self, ev):
atom = ev.xclient.data.l[0]
if atom == xlib.XInternAtom(ev.xclient.display,
'WM_DELETE_WINDOW', False):
self.dispatch_event('on_close')
elif (self._enable_xsync and
atom == xlib.XInternAtom(ev.xclient.display,
'_NET_WM_SYNC_REQUEST', False)):
lo = ev.xclient.data.l[2]
hi = ev.xclient.data.l[3]
self._current_sync_value = xsync.XSyncValue(hi, lo)
def _sync_resize(self):
if self._enable_xsync and self._current_sync_valid:
if xsync.XSyncValueIsZero(self._current_sync_value):
self._current_sync_valid = False
return
xsync.XSyncSetCounter(self._x_display,
self._sync_counter,
self._current_sync_value)
self._current_sync_value = None
self._current_sync_valid = False
@XlibEventHandler(xlib.ButtonPress)
@XlibEventHandler(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = self.height - ev.xbutton.y
button = 1 << (ev.xbutton.button - 1) # 1, 2, 3 -> 1, 2, 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if ev.type == xlib.ButtonPress:
# override_redirect issue: manually activate this window if
# fullscreen.
if self._fullscreen and not self._active:
self.activate()
if ev.xbutton.button == 4:
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif ev.xbutton.button == 5:
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
elif ev.xbutton.button < len(self._mouse_buttons):
self._mouse_buttons[ev.xbutton.button] = True
self.dispatch_event('on_mouse_press',
x, y, button, modifiers)
else:
if ev.xbutton.button < 4:
self._mouse_buttons[ev.xbutton.button] = False
self.dispatch_event('on_mouse_release',
x, y, button, modifiers)
@XlibEventHandler(xlib.Expose)
def _event_expose(self, ev):
# Ignore all expose events except the last one. We could be told
# about exposure rects - but I don't see the point since we're
# working with OpenGL and we'll just redraw the whole scene.
if ev.xexpose.count > 0: return
self.dispatch_event('on_expose')
@XlibEventHandler(xlib.EnterNotify)
def _event_enternotify(self, ev):
# figure active mouse buttons
# XXX ignore modifier state?
state = ev.xcrossing.state
self._mouse_buttons[1] = state & xlib.Button1Mask
self._mouse_buttons[2] = state & xlib.Button2Mask
self._mouse_buttons[3] = state & xlib.Button3Mask
self._mouse_buttons[4] = state & xlib.Button4Mask
self._mouse_buttons[5] = state & xlib.Button5Mask
# mouse position
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = True
# XXX there may be more we could do here
self.dispatch_event('on_mouse_enter', x, y)
@XlibEventHandler(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
@XlibEventHandler(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
if self._enable_xsync and self._current_sync_value:
self._current_sync_valid = True
self.switch_to()
w, h = ev.xconfigure.width, ev.xconfigure.height
x, y = ev.xconfigure.x, ev.xconfigure.y
if self._width != w or self._height != h:
self._width = w
self._height = h
self._needs_resize = True
if self._x != x or self._y != y:
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
@XlibEventHandler(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
xlib.XSetICFocus(self._x_ic)
@XlibEventHandler(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
xlib.XUnsetICFocus(self._x_ic)
@XlibEventHandler(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
@XlibEventHandler(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
| bsd-3-clause | af55512565ad56b4018f0cfb45a217e6 | 38.212483 | 80 | 0.563247 | 3.931691 | false | false | false | false |
mattpap/sympy-polys | sympy/mpmath/functions/functions.py | 1 | 11974 | class SpecialFunctions(object):
"""
This class implements special functions using high-level code.
Elementary and some other functions (e.g. gamma function, basecase
hypergeometric series) are assumed to be predefined by the context as
"builtins" or "low-level" functions.
"""
defined_functions = {}
# The series for the Jacobi theta functions converge for |q| < 1;
# in the current implementation they throw a ValueError for
# abs(q) > THETA_Q_LIM
THETA_Q_LIM = 1 - 10**-7
def __init__(self):
cls = self.__class__
for name in cls.defined_functions:
f, wrap = cls.defined_functions[name]
cls._wrap_specfun(name, f, wrap)
self.mpq_1 = self._mpq((1,1))
self.mpq_0 = self._mpq((0,1))
self.mpq_1_2 = self._mpq((1,2))
self.mpq_3_2 = self._mpq((3,2))
self.mpq_1_4 = self._mpq((1,4))
self.mpq_1_16 = self._mpq((1,16))
self.mpq_3_16 = self._mpq((3,16))
self.mpq_5_2 = self._mpq((5,2))
self.mpq_3_4 = self._mpq((3,4))
self.mpq_7_4 = self._mpq((7,4))
self.mpq_5_4 = self._mpq((5,4))
self._aliases.update({
'phase' : 'arg',
'conjugate' : 'conj',
'nthroot' : 'root',
'polygamma' : 'psi',
'hurwitz' : 'zeta',
#'digamma' : 'psi0',
#'trigamma' : 'psi1',
#'tetragamma' : 'psi2',
#'pentagamma' : 'psi3',
'fibonacci' : 'fib',
'factorial' : 'fac',
})
# Default -- do nothing
@classmethod
def _wrap_specfun(cls, name, f, wrap):
setattr(cls, name, f)
# Optional fast versions of common functions in common cases.
# If not overridden, default (generic hypergeometric series)
# implementations will be used
def _besselj(ctx, n, z): raise NotImplementedError
def _erf(ctx, z): raise NotImplementedError
def _erfc(ctx, z): raise NotImplementedError
def _gamma_upper_int(ctx, z, a): raise NotImplementedError
def _expint_int(ctx, n, z): raise NotImplementedError
def _zeta(ctx, s): raise NotImplementedError
def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError
def _ei(ctx, z): raise NotImplementedError
def _e1(ctx, z): raise NotImplementedError
def _ci(ctx, z): raise NotImplementedError
def _si(ctx, z): raise NotImplementedError
def _altzeta(ctx, s): raise NotImplementedError
def defun_wrapped(f):
SpecialFunctions.defined_functions[f.__name__] = f, True
def defun(f):
SpecialFunctions.defined_functions[f.__name__] = f, False
def defun_static(f):
setattr(SpecialFunctions, f.__name__, f)
@defun_wrapped
def cot(ctx, z): return ctx.one / ctx.tan(z)
@defun_wrapped
def sec(ctx, z): return ctx.one / ctx.cos(z)
@defun_wrapped
def csc(ctx, z): return ctx.one / ctx.sin(z)
@defun_wrapped
def coth(ctx, z): return ctx.one / ctx.tanh(z)
@defun_wrapped
def sech(ctx, z): return ctx.one / ctx.cosh(z)
@defun_wrapped
def csch(ctx, z): return ctx.one / ctx.sinh(z)
@defun_wrapped
def acot(ctx, z): return ctx.atan(ctx.one / z)
@defun_wrapped
def asec(ctx, z): return ctx.acos(ctx.one / z)
@defun_wrapped
def acsc(ctx, z): return ctx.asin(ctx.one / z)
@defun_wrapped
def acoth(ctx, z): return ctx.atanh(ctx.one / z)
@defun_wrapped
def asech(ctx, z): return ctx.acosh(ctx.one / z)
@defun_wrapped
def acsch(ctx, z): return ctx.asinh(ctx.one / z)
@defun
def sign(ctx, x):
x = ctx.convert(x)
if not x or ctx.isnan(x):
return x
if ctx._is_real_type(x):
if x > 0:
return ctx.one
else:
return -ctx.one
return x / abs(x)
@defun
def agm(ctx, a, b=1):
if b == 1:
return ctx.agm1(a)
a = ctx.convert(a)
b = ctx.convert(b)
return ctx._agm(a, b)
@defun_wrapped
def sinc(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sin(x)/x
@defun_wrapped
def sincpi(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sinpi(x)/(ctx.pi*x)
# TODO: tests; improve implementation
@defun_wrapped
def expm1(ctx, x):
if not x:
return ctx.zero
# exp(x) - 1 ~ x
if ctx.mag(x) < -ctx.prec:
return x + 0.5*x**2
# TODO: accurately eval the smaller of the real/imag parts
return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1)
@defun_wrapped
def powm1(ctx, x, y):
mag = ctx.mag
one = ctx.one
w = x**y - one
M = mag(w)
# Only moderate cancellation
if M > -8:
return w
# Check for the only possible exact cases
if not w:
if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)):
return w
x1 = x - one
magy = mag(y)
lnx = ctx.ln(x)
# Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2)
if magy + mag(lnx) < -ctx.prec:
return lnx*y + (lnx*y)**2/2
# TODO: accurately eval the smaller of the real/imag part
return ctx.sum_accurately(lambda: iter([x**y, -1]), 1)
@defun
def _rootof1(ctx, k, n):
k = int(k)
n = int(n)
k %= n
if not k:
return ctx.one
elif 2*k == n:
return -ctx.one
elif 4*k == n:
return ctx.j
elif 4*k == 3*n:
return -ctx.j
return ctx.expjpi(2*ctx.mpf(k)/n)
@defun
def root(ctx, x, n, k=0):
n = int(n)
x = ctx.convert(x)
if k:
# Special case: there is an exact real root
if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0):
return -ctx.root(-x, n)
# Multiply by root of unity
prec = ctx.prec
try:
ctx.prec += 10
v = ctx.root(x, n, 0) * ctx._rootof1(k, n)
finally:
ctx.prec = prec
return +v
return ctx._nthroot(x, n)
@defun
def unitroots(ctx, n, primitive=False):
gcd = ctx._gcd
prec = ctx.prec
try:
ctx.prec += 10
if primitive:
v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1]
else:
# TODO: this can be done *much* faster
v = [ctx._rootof1(k,n) for k in range(n)]
finally:
ctx.prec = prec
return [+x for x in v]
@defun
def arg(ctx, x):
x = ctx.convert(x)
re = ctx._re(x)
im = ctx._im(x)
return ctx.atan2(im, re)
@defun
def fabs(ctx, x):
return abs(ctx.convert(x))
@defun
def re(ctx, x):
x = ctx.convert(x)
if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers
return x.real
return x
@defun
def im(ctx, x):
x = ctx.convert(x)
if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers
return x.imag
return ctx.zero
@defun
def conj(ctx, x):
x = ctx.convert(x)
try:
return x.conjugate()
except AttributeError:
return x
@defun
def polar(ctx, z):
return (ctx.fabs(z), ctx.arg(z))
@defun_wrapped
def rect(ctx, r, phi):
return r * ctx.mpc(*ctx.cos_sin(phi))
@defun
def log(ctx, x, b=None):
if b is None:
return ctx.ln(x)
wp = ctx.prec + 20
return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp)
@defun
def log10(ctx, x):
return ctx.log(x, 10)
@defun
def fmod(ctx, x, y):
return ctx.convert(x) % ctx.convert(y)
@defun
def degrees(ctx, x):
return x / ctx.degree
@defun
def radians(ctx, x):
return x * ctx.degree
@defun_wrapped
def lambertw(ctx, z, k=0):
k = int(k)
if ctx.isnan(z):
return z
ctx.prec += 20
mag = ctx.mag(z)
# Start from fp approximation
if ctx is ctx._mp and abs(mag) < 900 and abs(k) < 10000 and \
abs(z+0.36787944117144) > 0.01:
w = ctx._fp.lambertw(z, k)
else:
absz = abs(z)
# We must be extremely careful near the singularities at -1/e and 0
u = ctx.exp(-1)
if absz <= u:
if not z:
# w(0,0) = 0; for all other branches we hit the pole
if not k:
return z
return ctx.ninf
if not k:
w = z
# For small real z < 0, the -1 branch aves roughly like log(-z)
elif k == -1 and not ctx.im(z) and ctx.re(z) < 0:
w = ctx.ln(-z)
# Use a simple asymptotic approximation.
else:
w = ctx.ln(z)
# The branches are roughly logarithmic. This approximation
# gets better for large |k|; need to check that this always
# works for k ~= -1, 0, 1.
if k: w += k * 2*ctx.pi*ctx.j
elif k == 0 and ctx.im(z) and absz <= 0.7:
# Both the W(z) ~= z and W(z) ~= ln(z) approximations break
# down around z ~= -0.5 (converging to the wrong branch), so patch
# with a constant approximation (adjusted for sign)
if abs(z+0.5) < 0.1:
if ctx.im(z) > 0:
w = ctx.mpc(0.7+0.7j)
else:
w = ctx.mpc(0.7-0.7j)
else:
w = z
else:
if z == ctx.inf:
if k == 0:
return z
else:
return z + 2*k*ctx.pi*ctx.j
if z == ctx.ninf:
return (-z) + (2*k+1)*ctx.pi*ctx.j
# Simple asymptotic approximation as above
w = ctx.ln(z)
if k:
w += k * 2*ctx.pi*ctx.j
# Use Halley iteration to solve w*exp(w) = z
two = ctx.mpf(2)
weps = ctx.ldexp(ctx.eps, 15)
for i in xrange(100):
ew = ctx.exp(w)
wew = w*ew
wewz = wew-z
wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
if abs(wn-w) < weps*abs(wn):
return wn
else:
w = wn
ctx.warn("Lambert W iteration failed to converge for %s" % z)
return wn
@defun_wrapped
def bell(ctx, n, x=1):
x = ctx.convert(x)
if not n:
if ctx.isnan(x):
return x
return type(x)(1)
if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n):
return x**n
if n == 1: return x
if n == 2: return x*(x+1)
if x == 0: return ctx.sincpi(n)
return _polyexp(ctx, n, x, True) / ctx.exp(x)
def _polyexp(ctx, n, x, extra=False):
def _terms():
if extra:
yield ctx.sincpi(n)
t = x
k = 1
while 1:
yield k**n * t
k += 1
t = t*x/k
return ctx.sum_accurately(_terms, check_step=4)
@defun_wrapped
def polyexp(ctx, s, z):
if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s):
return z**s
if z == 0: return z*s
if s == 0: return ctx.expm1(z)
if s == 1: return ctx.exp(z)*z
if s == 2: return ctx.exp(z)*z*(z+1)
return _polyexp(ctx, s, z)
@defun_wrapped
def cyclotomic(ctx, n, z):
n = int(n)
assert n >= 0
p = ctx.one
if n == 0:
return p
if n == 1:
return z - p
if n == 2:
return z + p
# Use divisor product representation. Unfortunately, this sometimes
# includes singularities for roots of unity, which we have to cancel out.
# Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1).
a_prod = 1
b_prod = 1
num_zeros = 0
num_poles = 0
for d in range(1,n+1):
if not n % d:
w = ctx.moebius(n//d)
# Use powm1 because it is important that we get 0 only
# if it really is exactly 0
b = -ctx.powm1(z, d)
if b:
p *= b**w
else:
if w == 1:
a_prod *= d
num_zeros += 1
elif w == -1:
b_prod *= d
num_poles += 1
#print n, num_zeros, num_poles
if num_zeros:
if num_zeros > num_poles:
p *= 0
else:
p *= a_prod
p /= b_prod
return p
| bsd-3-clause | 9f2af02019693eddd8877e72177a0fbe | 26.090498 | 84 | 0.52756 | 2.991257 | false | false | false | false |
mattpap/sympy-polys | sympy/polys/tests/test_galoistools.py | 2 | 22630 |
from sympy.polys.galoistools import (
gf_crt, gf_crt1, gf_crt2, gf_int,
gf_degree, gf_strip, gf_trunc, gf_normal,
gf_from_dict, gf_to_dict,
gf_from_int_poly, gf_to_int_poly,
gf_neg, gf_add_ground, gf_sub_ground, gf_mul_ground, gf_exquo_ground,
gf_add, gf_sub, gf_add_mul, gf_sub_mul, gf_mul, gf_sqr,
gf_div, gf_rem, gf_quo, gf_exquo,
gf_lshift, gf_rshift, gf_expand,
gf_pow, gf_pow_mod,
gf_gcd, gf_gcdex,
gf_LC, gf_TC, gf_monic,
gf_eval, gf_multi_eval,
gf_compose, gf_compose_mod,
gf_trace_map,
gf_diff, gf_random,
gf_irreducible, gf_irreducible_p,
gf_irred_p_ben_or, gf_irred_p_rabin,
gf_sqf_list, gf_sqf_part, gf_sqf_p,
gf_Qmatrix, gf_Qbasis,
gf_ddf_zassenhaus, gf_ddf_shoup,
gf_edf_zassenhaus, gf_edf_shoup,
gf_berlekamp, gf_zassenhaus, gf_shoup,
gf_factor_sqf, gf_factor,
)
from sympy.polys.polyerrors import (
ExactQuotientFailed,
)
from sympy.polys.algebratools import ZZ
from sympy import pi, nextprime, raises
def test_gf_crt():
U = [49, 76, 65]
M = [99, 97, 95]
p = 912285
u = 639985
assert gf_crt(U, M, ZZ) == u
E = [9215, 9405, 9603]
S = [62, 24, 12]
assert gf_crt1(M, ZZ) == (p, E, S)
assert gf_crt2(U, M, p, E, S, ZZ) == u
def test_gf_int():
assert gf_int(0, 5) == 0
assert gf_int(1, 5) == 1
assert gf_int(2, 5) == 2
assert gf_int(3, 5) ==-2
assert gf_int(4, 5) ==-1
assert gf_int(5, 5) == 0
def test_gf_degree():
assert gf_degree([]) == -1
assert gf_degree([1]) == 0
assert gf_degree([1,0]) == 1
assert gf_degree([1,0,0,0,1]) == 4
def test_gf_strip():
assert gf_strip([]) == []
assert gf_strip([0]) == []
assert gf_strip([0,0,0]) == []
assert gf_strip([1]) == [1]
assert gf_strip([0,1]) == [1]
assert gf_strip([0,0,0,1]) == [1]
assert gf_strip([1,2,0]) == [1,2,0]
assert gf_strip([0,1,2,0]) == [1,2,0]
assert gf_strip([0,0,0,1,2,0]) == [1,2,0]
def test_gf_trunc():
assert gf_trunc([], 11) == []
assert gf_trunc([1], 11) == [1]
assert gf_trunc([22], 11) == []
assert gf_trunc([12], 11) == [1]
assert gf_trunc([11,22,17,1,0], 11) == [6,1,0]
assert gf_trunc([12,23,17,1,0], 11) == [1,1,6,1,0]
def test_gf_normal():
assert gf_normal([11,22,17,1,0], 11, ZZ) == [6,1,0]
def test_gf_from_to_dict():
f = {11: 12, 6: 2, 0: 25}
F = {11: 1, 6: 2, 0: 3}
g = [1,0,0,0,0,2,0,0,0,0,0,3]
assert gf_from_dict(f, 11, ZZ) == g
assert gf_to_dict(g, 11) == F
f = {11: -5, 4: 0, 3: 1, 0: 12}
F = {11: -5, 3: 1, 0: 1}
g = [6,0,0,0,0,0,0,0,1,0,0,1]
assert gf_from_dict(f, 11, ZZ) == g
assert gf_to_dict(g, 11) == F
assert gf_to_dict([10], 11, symmetric=True) == {0: -1}
assert gf_to_dict([10], 11, symmetric=False) == {0: 10}
def test_gf_from_to_int_poly():
assert gf_from_int_poly([1,0,7,2,20], 5) == [1,0,2,2,0]
assert gf_to_int_poly([1,0,4,2,3], 5) == [1,0,-1,2,-2]
assert gf_to_int_poly([10], 11, symmetric=True) == [-1]
assert gf_to_int_poly([10], 11, symmetric=False) == [10]
def test_gf_LC():
assert gf_LC([], ZZ) == 0
assert gf_LC([1], ZZ) == 1
assert gf_LC([1,2], ZZ) == 1
def test_gf_TC():
assert gf_TC([], ZZ) == 0
assert gf_TC([1], ZZ) == 1
assert gf_TC([1,2], ZZ) == 2
def test_gf_monic():
assert gf_monic([], 11, ZZ) == (0, [])
assert gf_monic([1], 11, ZZ) == (1, [1])
assert gf_monic([2], 11, ZZ) == (2, [1])
assert gf_monic([1,2,3,4], 11, ZZ) == (1, [1,2,3,4])
assert gf_monic([2,3,4,5], 11, ZZ) == (2, [1,7,2,8])
def test_gf_arith():
assert gf_neg([], 11, ZZ) == []
assert gf_neg([1], 11, ZZ) == [10]
assert gf_neg([1,2,3], 11, ZZ) == [10,9,8]
assert gf_add_ground([], 0, 11, ZZ) == []
assert gf_sub_ground([], 0, 11, ZZ) == []
assert gf_add_ground([], 3, 11, ZZ) == [3]
assert gf_sub_ground([], 3, 11, ZZ) == [8]
assert gf_add_ground([1], 3, 11, ZZ) == [4]
assert gf_sub_ground([1], 3, 11, ZZ) == [9]
assert gf_add_ground([8], 3, 11, ZZ) == []
assert gf_sub_ground([3], 3, 11, ZZ) == []
assert gf_add_ground([1,2,3], 3, 11, ZZ) == [1,2,6]
assert gf_sub_ground([1,2,3], 3, 11, ZZ) == [1,2,0]
assert gf_mul_ground([], 0, 11, ZZ) == []
assert gf_mul_ground([], 1, 11, ZZ) == []
assert gf_mul_ground([1], 0, 11, ZZ) == []
assert gf_mul_ground([1], 1, 11, ZZ) == [1]
assert gf_mul_ground([1,2,3], 0, 11, ZZ) == []
assert gf_mul_ground([1,2,3], 1, 11, ZZ) == [1,2,3]
assert gf_mul_ground([1,2,3], 7, 11, ZZ) == [7,3,10]
assert gf_add([], [], 11, ZZ) == []
assert gf_add([1], [], 11, ZZ) == [1]
assert gf_add([], [1], 11, ZZ) == [1]
assert gf_add([1], [1], 11, ZZ) == [2]
assert gf_add([1], [2], 11, ZZ) == [3]
assert gf_add([1,2], [1], 11, ZZ) == [1,3]
assert gf_add([1], [1,2], 11, ZZ) == [1,3]
assert gf_add([1,2,3], [8,9,10], 11, ZZ) == [9,0,2]
assert gf_sub([], [], 11, ZZ) == []
assert gf_sub([1], [], 11, ZZ) == [1]
assert gf_sub([], [1], 11, ZZ) == [10]
assert gf_sub([1], [1], 11, ZZ) == []
assert gf_sub([1], [2], 11, ZZ) == [10]
assert gf_sub([1,2], [1], 11, ZZ) == [1,1]
assert gf_sub([1], [1,2], 11, ZZ) == [10,10]
assert gf_sub([3,2,1], [8,9,10], 11, ZZ) == [6,4,2]
assert gf_add_mul([1,5,6], [7,3], [8,0,6,1], 11, ZZ) == [1,2,10,8,9]
assert gf_sub_mul([1,5,6], [7,3], [8,0,6,1], 11, ZZ) == [10,9,3,2,3]
assert gf_mul([], [], 11, ZZ) == []
assert gf_mul([], [1], 11, ZZ) == []
assert gf_mul([1], [], 11, ZZ) == []
assert gf_mul([1], [1], 11, ZZ) == [1]
assert gf_mul([5], [7], 11, ZZ) == [2]
assert gf_mul([3,0,0,6,1,2], [4,0,1,0], 11, ZZ) == [1,0,3,2,4,3,1,2,0]
assert gf_mul([4,0,1,0], [3,0,0,6,1,2], 11, ZZ) == [1,0,3,2,4,3,1,2,0]
assert gf_mul([2,0,0,1,7], [2,0,0,1,7], 11, ZZ) == [4,0,0,4,6,0,1,3,5]
assert gf_sqr([], 11, ZZ) == []
assert gf_sqr([2], 11, ZZ) == [4]
assert gf_sqr([1,2], 11, ZZ) == [1,4,4]
assert gf_sqr([2,0,0,1,7], 11, ZZ) == [4,0,0,4,6,0,1,3,5]
def test_gf_division():
raises(ZeroDivisionError, "gf_div([1,2,3], [], 11, ZZ)")
raises(ZeroDivisionError, "gf_rem([1,2,3], [], 11, ZZ)")
raises(ZeroDivisionError, "gf_quo([1,2,3], [], 11, ZZ)")
raises(ZeroDivisionError, "gf_exquo([1,2,3], [], 11, ZZ)")
assert gf_div([1], [1,2,3], 7, ZZ) == ([], [1])
assert gf_exquo([1], [1,2,3], 7, ZZ) == []
assert gf_rem([1], [1,2,3], 7, ZZ) == [1]
f, g, q, r = [5,4,3,2,1,0], [1,2,3], [5,1,0,6], [3,3]
assert gf_div(f, g, 7, ZZ) == (q, r)
assert gf_exquo(f, g, 7, ZZ) == q
assert gf_rem(f, g, 7, ZZ) == r
raises(ExactQuotientFailed, "gf_quo(f, g, 7, ZZ)")
f, g, q, r = [5,4,3,2,1,0], [1,2,3,0], [5,1,0], [6,1,0]
assert gf_div(f, g, 7, ZZ) == (q, r)
assert gf_exquo(f, g, 7, ZZ) == q
assert gf_rem(f, g, 7, ZZ) == r
raises(ExactQuotientFailed, "gf_quo(f, g, 7, ZZ)")
assert gf_quo([1,2,1], [1,1], 11, ZZ) == [1,1]
def test_gf_shift():
f = [1,2,3,4,5]
assert gf_lshift([], 5, ZZ) == []
assert gf_rshift([], 5, ZZ) == ([], [])
assert gf_lshift(f, 1, ZZ) == [1,2,3,4,5,0]
assert gf_lshift(f, 2, ZZ) == [1,2,3,4,5,0,0]
assert gf_rshift(f, 0, ZZ) == (f, [])
assert gf_rshift(f, 1, ZZ) == ([1,2,3,4], [5])
assert gf_rshift(f, 3, ZZ) == ([1,2], [3,4,5])
assert gf_rshift(f, 5, ZZ) == ([], f)
def test_gf_expand():
F = [([1,1], 2), ([1,2], 3)]
assert gf_expand(F, 11, ZZ) == [1,8,3,5,6,8]
assert gf_expand((4, F), 11, ZZ) == [4,10,1,9,2,10]
def test_gf_powering():
assert gf_pow([1,0,0,1,8], 0, 11, ZZ) == [1]
assert gf_pow([1,0,0,1,8], 1, 11, ZZ) == [1, 0, 0, 1, 8]
assert gf_pow([1,0,0,1,8], 2, 11, ZZ) == [1, 0, 0, 2, 5, 0, 1, 5, 9]
assert gf_pow([1,0,0,1,8], 5, 11, ZZ) == \
[1, 0, 0, 5, 7, 0, 10, 6, 2, 10, 9, 6, 10, 6, 6, 0, 5, 2, 5, 9, 10]
assert gf_pow([1,0,0,1,8], 8, 11, ZZ) == \
[1, 0, 0, 8, 9, 0, 6, 8, 10, 1, 2, 5, 10, 7, 7, 9, 1, 2, 0, 0, 6, 2,
5, 2, 5, 7, 7, 9, 10, 10, 7, 5, 5]
assert gf_pow([1,0,0,1,8], 45, 11, ZZ) == \
[ 1, 0, 0, 1, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 4, 10, 0, 0, 0, 0, 0, 0,
10, 0, 0, 10, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6, 0, 0, 6, 4, 0, 0, 0, 0, 0, 0, 8, 0, 0, 8, 9, 0, 0, 0, 0, 0, 0,
10, 0, 0, 10, 3, 0, 0, 0, 0, 0, 0, 4, 0, 0, 4, 10, 0, 0, 0, 0, 0, 0,
8, 0, 0, 8, 9, 0, 0, 0, 0, 0, 0, 9, 0, 0, 9, 6, 0, 0, 0, 0, 0, 0,
3, 0, 0, 3, 2, 0, 0, 0, 0, 0, 0, 10, 0, 0, 10, 3, 0, 0, 0, 0, 0, 0,
10, 0, 0, 10, 3, 0, 0, 0, 0, 0, 0, 2, 0, 0, 2, 5, 0, 0, 0, 0, 0, 0,
4, 0, 0, 4, 10]
assert gf_pow_mod([1,0,0,1,8], 0, [2,0,7], 11, ZZ) == [1]
assert gf_pow_mod([1,0,0,1,8], 1, [2,0,7], 11, ZZ) == [1,1]
assert gf_pow_mod([1,0,0,1,8], 2, [2,0,7], 11, ZZ) == [2,3]
assert gf_pow_mod([1,0,0,1,8], 5, [2,0,7], 11, ZZ) == [7,8]
assert gf_pow_mod([1,0,0,1,8], 8, [2,0,7], 11, ZZ) == [1,5]
assert gf_pow_mod([1,0,0,1,8], 45, [2,0,7], 11, ZZ) == [5,4]
def test_gf_euclidean():
assert gf_gcd([], [], 11, ZZ) == []
assert gf_gcd([2], [], 11, ZZ) == [1]
assert gf_gcd([], [2], 11, ZZ) == [1]
assert gf_gcd([2], [2], 11, ZZ) == [1]
assert gf_gcd([], [1,0], 11, ZZ) == [1,0]
assert gf_gcd([1,0], [], 11, ZZ) == [1,0]
assert gf_gcd([3,0], [3,0], 11, ZZ) == [1,0]
assert gf_gcd([1,8,7], [1,7,1,7], 11, ZZ) == [1,7]
assert gf_gcdex([], [], 11, ZZ) == ([1], [], [])
assert gf_gcdex([2], [], 11, ZZ) == ([6], [], [1])
assert gf_gcdex([], [2], 11, ZZ) == ([], [6], [1])
assert gf_gcdex([2], [2], 11, ZZ) == ([], [6], [1])
assert gf_gcdex([], [3,0], 11, ZZ) == ([], [4], [1,0])
assert gf_gcdex([3,0], [], 11, ZZ) == ([4], [], [1,0])
assert gf_gcdex([3,0], [3,0], 11, ZZ) == ([], [4], [1,0])
assert gf_gcdex([1,8,7], [1,7,1,7], 11, ZZ) == ([5,6], [6], [1,7])
def test_gf_diff():
assert gf_diff([], 11, ZZ) == []
assert gf_diff([7], 11, ZZ) == []
assert gf_diff([7,3], 11, ZZ) == [7]
assert gf_diff([7,3,1], 11, ZZ) == [3,3]
assert gf_diff([1,0,0,0,0,0,0,0,0,0,0,1], 11, ZZ) == []
def test_gf_eval():
assert gf_eval([], 4, 11, ZZ) == 0
assert gf_eval([], 27, 11, ZZ) == 0
assert gf_eval([7], 4, 11, ZZ) == 7
assert gf_eval([7], 27, 11, ZZ) == 7
assert gf_eval([1,0,3,2,4,3,1,2,0], 0, 11, ZZ) == 0
assert gf_eval([1,0,3,2,4,3,1,2,0], 4, 11, ZZ) == 9
assert gf_eval([1,0,3,2,4,3,1,2,0], 27, 11, ZZ) == 5
assert gf_eval([4,0,0,4,6,0,1,3,5], 0, 11, ZZ) == 5
assert gf_eval([4,0,0,4,6,0,1,3,5], 4, 11, ZZ) == 3
assert gf_eval([4,0,0,4,6,0,1,3,5], 27, 11, ZZ) == 9
assert gf_multi_eval([3,2,1], [0,1,2,3], 11, ZZ) == [1,6,6,1]
def test_gf_compose():
assert gf_compose([], [1,0], 11, ZZ) == []
assert gf_compose_mod([], [1,0], [1,0], 11, ZZ) == []
assert gf_compose([1], [], 11, ZZ) == [1]
assert gf_compose([1,0], [], 11, ZZ) == []
assert gf_compose([1,0], [1,0], 11, ZZ) == [1,0]
f, g, h = [1, 1, 4, 9, 1], [1,1,1], [1,0,0,2]
assert gf_compose(g, h, 11, ZZ) == [1,0,0,5,0,0,7]
assert gf_compose_mod(g, h, f, 11, ZZ) == [3,9,6,10]
def test_gf_trace_map():
f, a, c = [1, 1, 4, 9, 1], [1,1,1], [1,0]
b = gf_pow_mod(c, 11, f, 11, ZZ)
assert gf_trace_map(a, b, c, 0, f, 11, ZZ) == \
([1, 1, 1], [1, 1, 1])
assert gf_trace_map(a, b, c, 1, f, 11, ZZ) == \
([5, 2, 10, 3], [5, 3, 0, 4])
assert gf_trace_map(a, b, c, 2, f, 11, ZZ) == \
([5, 9, 5, 3], [10, 1, 5, 7])
assert gf_trace_map(a, b, c, 3, f, 11, ZZ) == \
([1, 10, 6, 0], [7])
assert gf_trace_map(a, b, c, 4, f, 11, ZZ) == \
([1, 1, 1], [1, 1, 8])
assert gf_trace_map(a, b, c, 5, f, 11, ZZ) == \
([5, 2, 10, 3], [5, 3, 0, 0])
assert gf_trace_map(a, b, c, 11, f, 11, ZZ) == \
([1, 10, 6, 0], [10])
def test_gf_irreducible():
assert gf_irreducible_p(gf_irreducible(1, 11, ZZ), 11, ZZ) == True
assert gf_irreducible_p(gf_irreducible(2, 11, ZZ), 11, ZZ) == True
assert gf_irreducible_p(gf_irreducible(3, 11, ZZ), 11, ZZ) == True
assert gf_irreducible_p(gf_irreducible(4, 11, ZZ), 11, ZZ) == True
assert gf_irreducible_p(gf_irreducible(5, 11, ZZ), 11, ZZ) == True
assert gf_irreducible_p(gf_irreducible(6, 11, ZZ), 11, ZZ) == True
assert gf_irreducible_p(gf_irreducible(7, 11, ZZ), 11, ZZ) == True
def test_gf_irreducible_p():
assert gf_irred_p_ben_or([7], 11, ZZ) == True
assert gf_irred_p_ben_or([7,3], 11, ZZ) == True
assert gf_irred_p_ben_or([7,3,1], 11, ZZ) == False
assert gf_irred_p_rabin([7], 11, ZZ) == True
assert gf_irred_p_rabin([7,3], 11, ZZ) == True
assert gf_irred_p_rabin([7,3,1], 11, ZZ) == False
assert gf_irreducible_p([7], 11, ZZ, method='ben-or') == True
assert gf_irreducible_p([7,3], 11, ZZ, method='ben-or') == True
assert gf_irreducible_p([7,3,1], 11, ZZ, method='ben-or') == False
assert gf_irreducible_p([7], 11, ZZ, method='rabin') == True
assert gf_irreducible_p([7,3], 11, ZZ, method='rabin') == True
assert gf_irreducible_p([7,3,1], 11, ZZ, method='rabin') == False
raises(KeyError, "gf_irreducible_p([7], 11, ZZ, method='other')")
f = [1, 9, 9, 13, 16, 15, 6, 7, 7, 7, 10]
g = [1, 7, 16, 7, 15, 13, 13, 11, 16, 10, 9]
h = gf_mul(f, g, 17, ZZ)
assert gf_irred_p_ben_or(f, 17, ZZ) == True
assert gf_irred_p_ben_or(g, 17, ZZ) == True
assert gf_irred_p_ben_or(h, 17, ZZ) == False
assert gf_irred_p_rabin(f, 17, ZZ) == True
assert gf_irred_p_rabin(g, 17, ZZ) == True
assert gf_irred_p_rabin(h, 17, ZZ) == False
def test_gf_squarefree():
assert gf_sqf_list([], 11, ZZ) == (0, [])
assert gf_sqf_list([1], 11, ZZ) == (1, [])
assert gf_sqf_list([1,1], 11, ZZ) == (1, [([1, 1], 1)])
assert gf_sqf_p([], 11, ZZ) == True
assert gf_sqf_p([1], 11, ZZ) == True
assert gf_sqf_p([1,1], 11, ZZ) == True
f = gf_from_dict({11: 1, 0: 1}, 11, ZZ)
assert gf_sqf_p(f, 11, ZZ) == False
assert gf_sqf_list(f, 11, ZZ) == \
(1, [([1, 1], 11)])
f = [1, 5, 8, 4]
assert gf_sqf_p(f, 11, ZZ) == False
assert gf_sqf_list(f, 11, ZZ) == \
(1, [([1, 1], 1),
([1, 2], 2)])
assert gf_sqf_part(f, 11, ZZ) == [1, 3, 2]
f = [1,0,0,2,0,0,2,0,0,1,0]
assert gf_sqf_list(f, 3, ZZ) == \
(1, [([1, 0], 1),
([1, 1], 3),
([1, 2], 6)])
def test_gf_berlekamp():
f = gf_from_int_poly([1,-3,1,-3,-1,-3,1], 11)
Q = [[1, 0, 0, 0, 0, 0],
[3, 5, 8, 8, 6, 5],
[3, 6, 6, 1,10, 0],
[9, 4,10, 3, 7, 9],
[7, 8,10, 0, 0, 8],
[8,10, 7, 8,10, 8]]
V = [[1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 7, 9, 0, 1]]
assert gf_Qmatrix(f, 11, ZZ) == Q
assert gf_Qbasis(Q, 11, ZZ) == V
assert gf_berlekamp(f, 11, ZZ) == \
[[1, 1], [1, 5, 3], [1, 2, 3, 4]]
f = [1,0,1,0,10,10,8,2,8]
Q = [[1, 0, 0, 0, 0, 0, 0, 0],
[2, 1, 7,11,10,12, 5,11],
[3, 6, 4, 3, 0, 4, 7, 2],
[4, 3, 6, 5, 1, 6, 2, 3],
[2,11, 8, 8, 3, 1, 3,11],
[6,11, 8, 6, 2, 7,10, 9],
[5,11, 7,10, 0,11, 7,12],
[3, 3,12, 5, 0,11, 9,12]]
V = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 5, 5, 0, 9, 5, 1, 0],
[0, 9,11, 9,10,12, 0, 1]]
assert gf_Qmatrix(f, 13, ZZ) == Q
assert gf_Qbasis(Q, 13, ZZ) == V
assert gf_berlekamp(f, 13, ZZ) == \
[[1, 3], [1, 8, 4, 12], [1, 2, 3, 4, 6]]
def test_gf_ddf():
f = gf_from_dict({15: 1, 0: -1}, 11, ZZ)
g = [([1, 0, 0, 0, 0, 10], 1),
([1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1], 2)]
assert gf_ddf_zassenhaus(f, 11, ZZ) == g
assert gf_ddf_shoup(f, 11, ZZ) == g
f = gf_from_dict({63: 1, 0: 1}, 2, ZZ)
g = [([1, 1], 1),
([1, 1, 1], 2),
([1, 1, 1, 1, 1, 1, 1], 3),
([1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1], 6)]
assert gf_ddf_zassenhaus(f, 2, ZZ) == g
assert gf_ddf_shoup(f, 2, ZZ) == g
f = gf_from_dict({6: 1, 5: -1, 4: 1, 3: 1, 1: -1}, 3, ZZ)
g = [([1, 1, 0], 1),
([1, 1, 0, 1, 2], 2)]
assert gf_ddf_zassenhaus(f, 3, ZZ) == g
assert gf_ddf_shoup(f, 3, ZZ) == g
f = [1, 2, 5, 26, 677, 436, 791, 325, 456, 24, 577]
g = [([1, 701], 1),
([1, 110, 559, 532, 694, 151, 110, 70, 735, 122], 9)]
assert gf_ddf_zassenhaus(f, 809, ZZ) == g
assert gf_ddf_shoup(f, 809, ZZ) == g
p = ZZ(nextprime(int((2**15 * pi).evalf())))
f = gf_from_dict({15: 1, 1: 1, 0: 1}, p, ZZ)
g = [([1, 22730, 68144], 2),
([1, 64876, 83977, 10787, 12561, 68608, 52650, 88001, 84356], 4),
([1, 15347, 95022, 84569, 94508, 92335], 5)]
assert gf_ddf_zassenhaus(f, p, ZZ) == g
assert gf_ddf_shoup(f, p, ZZ) == g
def test_gf_edf():
f = [1, 1, 0, 1, 2]
g = [[1, 0, 1], [1, 1, 2]]
assert gf_edf_zassenhaus(f, 2, 3, ZZ) == g
assert gf_edf_shoup(f, 2, 3, ZZ) == g
def test_gf_factor():
assert gf_factor([], 11, ZZ) == (0, [])
assert gf_factor([1], 11, ZZ) == (1, [])
assert gf_factor([1,1], 11, ZZ) == (1, [([1, 1], 1)])
assert gf_factor_sqf([], 11, ZZ) == (0, [])
assert gf_factor_sqf([1], 11, ZZ) == (1, [])
assert gf_factor_sqf([1,1], 11, ZZ) == (1, [[1, 1]])
assert gf_factor_sqf([], 11, ZZ, method='berlekamp') == (0, [])
assert gf_factor_sqf([1], 11, ZZ, method='berlekamp') == (1, [])
assert gf_factor_sqf([1,1], 11, ZZ, method='berlekamp') == (1, [[1, 1]])
assert gf_factor_sqf([], 11, ZZ, method='zassenhaus') == (0, [])
assert gf_factor_sqf([1], 11, ZZ, method='zassenhaus') == (1, [])
assert gf_factor_sqf([1,1], 11, ZZ, method='zassenhaus') == (1, [[1, 1]])
assert gf_factor_sqf([], 11, ZZ, method='shoup') == (0, [])
assert gf_factor_sqf([1], 11, ZZ, method='shoup') == (1, [])
assert gf_factor_sqf([1,1], 11, ZZ, method='shoup') == (1, [[1, 1]])
f, p = [1,0,0,1,0], 2
g = (1, [([1, 0], 1),
([1, 1], 1),
([1, 1, 1], 1)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
g = (1, [[1, 0],
[1, 1],
[1, 1, 1]])
assert gf_factor_sqf(f, p, ZZ, method='berlekamp') == g
assert gf_factor_sqf(f, p, ZZ, method='zassenhaus') == g
assert gf_factor_sqf(f, p, ZZ, method='shoup') == g
f, p = gf_from_int_poly([1,-3,1,-3,-1,-3,1], 11), 11
g = (1, [([1, 1], 1),
([1, 5, 3], 1),
([1, 2, 3, 4], 1)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
f, p = [1, 5, 8, 4], 11
g = (1, [([1, 1], 1), ([1, 2], 2)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
f, p = [1, 1, 10, 1, 0, 10, 10, 10, 0, 0], 11
g = (1, [([1, 0], 2), ([1, 9, 5], 1), ([1, 3, 0, 8, 5, 2], 1)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
f, p = gf_from_dict({32: 1, 0: 1}, 11, ZZ), 11
g = (1, [([1, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 10], 1),
([1, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 10], 1)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
f, p = gf_from_dict({32: 8, 0: 5}, 11, ZZ), 11
g = (8, [([1, 3], 1),
([1, 8], 1),
([1, 0, 9], 1),
([1, 2, 2], 1),
([1, 9, 2], 1),
([1, 0, 5, 0, 7], 1),
([1, 0, 6, 0, 7], 1),
([1, 0, 0, 0, 1, 0, 0, 0, 6], 1),
([1, 0, 0, 0, 10, 0, 0, 0, 6], 1)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
f, p = gf_from_dict({63: 8, 0: 5}, 11, ZZ), 11
g = (8, [([1, 7], 1),
([1, 4, 5], 1),
([1, 6, 8, 2], 1),
([1, 9, 9, 2], 1),
([1, 0, 0, 9, 0, 0, 4], 1),
([1, 2, 0, 8, 4, 6, 4], 1),
([1, 2, 3, 8, 0, 6, 4], 1),
([1, 2, 6, 0, 8, 4, 4], 1),
([1, 3, 3, 1, 6, 8, 4], 1),
([1, 5, 6, 0, 8, 6, 4], 1),
([1, 6, 2, 7, 9, 8, 4], 1),
([1, 10, 4, 7, 10, 7, 4], 1),
([1, 10, 10, 1, 4, 9, 4], 1)])
assert gf_factor(f, p, ZZ, method='berlekamp') == g
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
# Gathen polynomials: x**n + x + 1 (mod p > 2**n * pi)
p = ZZ(nextprime(int((2**15 * pi).evalf())))
f = gf_from_dict({15: 1, 1: 1, 0: 1}, p, ZZ)
assert gf_sqf_p(f, p, ZZ) == True
g = (1, [([1, 22730, 68144], 1),
([1, 81553, 77449, 86810, 4724], 1),
([1, 86276, 56779, 14859, 31575], 1),
([1, 15347, 95022, 84569, 94508, 92335], 1)])
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
g = (1, [[1, 22730, 68144],
[1, 81553, 77449, 86810, 4724],
[1, 86276, 56779, 14859, 31575],
[1, 15347, 95022, 84569, 94508, 92335]])
assert gf_factor_sqf(f, p, ZZ, method='zassenhaus') == g
assert gf_factor_sqf(f, p, ZZ, method='shoup') == g
# Shoup polynomials: f = a_0 x**n + a_1 x**(n-1) + ... + a_n
# (mod p > 2**(n-2) * pi), where a_n = a_{n-1}**2 + 1, a_0 = 1
p = ZZ(nextprime(int((2**4 * pi).evalf())))
f = [1, 2, 5, 26, 41, 39, 38]
assert gf_sqf_p(f, p, ZZ) == True
g = (1, [([1, 44, 26], 1),
([1, 11, 25, 18, 30], 1)])
assert gf_factor(f, p, ZZ, method='zassenhaus') == g
assert gf_factor(f, p, ZZ, method='shoup') == g
g = (1, [[1, 44, 26],
[1, 11, 25, 18, 30]])
assert gf_factor_sqf(f, p, ZZ, method='zassenhaus') == g
assert gf_factor_sqf(f, p, ZZ, method='shoup') == g
| bsd-3-clause | d27847e79cc091aeff274a10258a24f9 | 32.133236 | 79 | 0.456606 | 2.198795 | false | false | false | false |
mattpap/sympy-polys | sympy/polys/groebnertools.py | 3 | 16634 | """Sparse distributed multivariate polynomials and Groebner bases. """
from sympy.polys.monomialtools import (
monomial_mul,
monomial_div,
monomial_lcm,
monomial_lex_cmp as O_lex,
monomial_grlex_cmp as O_grlex,
monomial_grevlex_cmp as O_grevlex,
)
from sympy.polys.polyerrors import (
ExactQuotientFailed,
)
from sympy.utilities import any, all
from operator import itemgetter
def sdp_LC(f, K):
"""Returns the leading coeffcient of `f`. """
if not f:
return K.zero
else:
return f[0][1]
def sdp_LM(f, u):
"""Returns the leading monomial of `f`. """
if not f:
return (0,)*(u+1)
else:
return f[0][0]
def sdp_LT(f, u, K):
"""Returns the leading term of `f`. """
return sdp_LM(f, u), sdp_LC(f, K)
def sdp_del_LT(f):
"""Removes the leading from `f`. """
return f[1:]
def sdp_coeffs(f):
"""Returns a list of monomials in `f`. """
return [ coeff for _, coeff in f ]
def sdp_monoms(f):
"""Returns a list of monomials in `f`. """
return [ monom for monom, _ in f ]
def sdp_sort(f, O):
"""Sort terms in `f` using the given monomial order `O`. """
return sorted(f, O, key=itemgetter(0), reverse=True)
def sdp_strip(f):
"""Remove terms with zero coefficients from `f` in `K[X]`. """
return [ (monom, coeff) for monom, coeff in f if coeff ]
def sdp_normal(f, K):
"""Normalize distributed polynomial in the given domain. """
return [ (monom, K.convert(coeff)) for monom, coeff in f if coeff ]
def sdp_from_dict(f, O):
"""Make a distributed polynomial from a dictionary. """
return sdp_sort(f.items(), O)
def sdp_to_dict(f):
"""Make a dictionary from a distributed polynomial. """
return dict(f)
def sdp_indep_p(f, j, u):
"""Returns `True` if a polynomial is independent of `x_j`. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
else:
return all(not monom[j] for monom in sdp_monoms(h))
def sdp_one_p(f, u, K):
"""Returns True if `f` is a multivariate one in `K[X]`. """
return f == sdp_one(u, K)
def sdp_one(u, K):
"""Returns a multivariate one in `K[X]`. """
return (((0,)*(u+1), K.one),)
def sdp_term_p(f):
"""Returns True if `f` has a single term or is zero. """
return len(f) <= 1
def sdp_abs(f, u, O, K):
"""Make all coefficients positive in `K[X]`. """
return [ (monom, K.abs(coeff)) for monom, coeff in f ]
def sdp_neg(f, u, O, K):
"""Negate a polynomial in `K[X]`. """
return [ (monom, -coeff) for monom, coeff in f ]
def sdp_add_term(f, (M, c), u, O, K):
"""Add a single term using bisection method. """
if not c:
return f
if not f:
return [(M, c)]
monoms = sdp_monoms(f)
if O(M, monoms[ 0]) > 0:
return [(M, c)] + f
if O(M, monoms[-1]) < 0:
return f + [(M, c)]
lo, hi = 0, len(monoms)-1
while lo <= hi:
i = (lo + hi) // 2
j = O(M, monoms[i])
if not j:
coeff = f[i][1] + c
if not coeff:
return f[:i] + f[i+1:]
else:
return f[:i] + [(M, coeff)] + f[i+1:]
else:
if j > 0:
hi = i - 1
else:
lo = i + 1
else:
return f[:i] + [(M, c)] + f[i+1:]
def sdp_sub_term(f, (M, c), u, O, K):
"""Sub a single term using bisection method. """
if not c:
return f
if not f:
return [(M, -c)]
monoms = sdp_monoms(f)
if O(M, monoms[ 0]) > 0:
return [(M, -c)] + f
if O(M, monoms[-1]) < 0:
return f + [(M, -c)]
lo, hi = 0, len(monoms)-1
while lo <= hi:
i = (lo + hi) // 2
j = O(M, monoms[i])
if not j:
coeff = f[i][1] - c
if not coeff:
return f[:i] + f[i+1:]
else:
return f[:i] + [(M, coeff)] + f[i+1:]
else:
if j > 0:
hi = i - 1
else:
lo = i + 1
else:
return f[:i] + [(M, -c)] + f[i+1:]
def sdp_mul_term(f, (M, c), u, O, K):
"""Multiply a distributed polynomial by a term. """
if not f or not c:
return []
else:
if K.is_one(c):
return [ (monomial_mul(f_M, M), f_c) for f_M, f_c in f ]
else:
return [ (monomial_mul(f_M, M), f_c*c) for f_M, f_c in f ]
def sdp_add(f, g, u, O, K):
"""Add distributed polynomials in `K[X]`. """
h = dict(f)
for monom, c in g:
if h.has_key(monom):
coeff = h[monom] + c
if not coeff:
del h[monom]
else:
h[monom] = coeff
else:
h[monom] = c
return sdp_from_dict(h, O)
def sdp_sub(f, g, u, O, K):
"""Subtract distributed polynomials in `K[X]`. """
h = dict(f)
for monom, c in g:
if h.has_key(monom):
coeff = h[monom] - c
if not coeff:
del h[monom]
else:
h[monom] = coeff
else:
h[monom] = -c
return sdp_from_dict(h, O)
def sdp_mul(f, g, u, O, K):
"""Multiply distributed polynomials in `K[X]`. """
if sdp_term_p(f):
if not f:
return f
else:
return sdp_mul_term(g, f[0], u, O, K)
if sdp_term_p(g):
if not g:
return g
else:
return sdp_mul_term(f, g[0], u, O, K)
h = {}
for fm, fc in f:
for gm, gc in g:
monom = monomial_mul(fm, gm)
coeff = fc*gc
if h.has_key(monom):
coeff += h[monom]
if not coeff:
del h[monom]
continue
h[monom] = coeff
return sdp_from_dict(h, O)
def sdp_sqr(f, u, O, K):
"""Square a distributed polynomial in `K[X]`. """
h = {}
for fm, fc in f:
for Fm, Fc in f:
monom = monomial_mul(fm, Fm)
coeff = fc*Fc
if h.has_key(monom):
coeff += h[monom]
if not coeff:
del h[monom]
continue
h[monom] = coeff
return sdp_from_dict(h, O)
def sdp_pow(f, n, u, O, K):
"""Raise `f` to the n-th power in `K[X]`. """
if not n:
return sdp_one(u, K)
if n < 0:
raise ValueError("can't raise a polynomial to negative power")
if n == 1 or not f or sdp_one_p(f, u, K):
return f
g = sdp_one(u, K)
while True:
n, m = n//2, n
if m & 1:
g = sdp_mul(g, f, u, O, K)
if not n:
break
f = sdp_sqr(f, u, O, K)
return g
def sdp_monic(f, K):
"""Divides all coefficients by `LC(f)` in `K[X]`. """
if not f:
return f
lc_f = sdp_LC(f, K)
if K.is_one(lc_f):
return f
else:
return [ (m, K.quo(c, lc_f)) for m, c in f ]
def sdp_content(f, K):
"""Returns GCD of coefficients in `K[X]`. """
if K.has_Field:
return K.one
else:
cont = K.zero
for _, c in f:
cont = K.gcd(cont, c)
if K.is_one(cont):
break
return cont
def sdp_primitive(f, K):
"""Returns content and a primitive polynomial in `K[X]`. """
if K.has_Field:
return K.one, f
else:
cont = sdp_content(f, K)
if K.is_one(cont):
return cont, f
else:
return cont, [ (m, K.exquo(c, cont)) for m, c in f ]
def _term_rr_div(a, b, K):
"""Division of two terms in over a ring. """
a_lm, a_lc = a
b_lm, b_lc = b
monom = monomial_div(a_lm, b_lm)
if not (monom is None or a_lc % b_lc):
return monom, K.exquo(a_lc, b_lc)
else:
return None
def _term_ff_div(a, b, K):
"""Division of two terms in over a field. """
a_lm, a_lc = a
b_lm, b_lc = b
monom = monomial_div(a_lm, b_lm)
if monom is not None:
return monom, K.exquo(a_lc, b_lc)
else:
return None
def sdp_div(f, G, u, O, K):
"""Generalized polynomial division with remainder in `K[X]`.
Given polynomial `f` and a set of polynomials `g = (g_1, ..., g_n)`
compute a set of quotients `q = (q_1, ..., q_n)` and remainder `r`
such that `f = q_1*f_1 + ... + q_n*f_n + r`, where `r = 0` or `r`
is a completely reduced polynomial with respect to `g`.
References
==========
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and
Algorithms, Springer, Second Edition, 1997, pp. 62
.. [Ajwa95] I.A. Ajwa, Z. Liu, P.S. Wang, Groebner Bases Algorithm,
http://citeseer.ist.psu.edu/ajwa95grbner.html, 1995
"""
Q, r = [ [] for _ in xrange(len(G)) ], []
if K.has_Field:
term_div = _term_ff_div
else:
term_div = _term_rr_div
while f:
for i, g in enumerate(G):
tq = term_div(sdp_LT(f, u, K), sdp_LT(g, u, K), K)
if tq is not None:
Q[i] = sdp_add_term(Q[i], tq, u, O, K)
f = sdp_sub(f, sdp_mul_term(g, tq, u, O, K), u, O, K)
break
else:
r = sdp_add_term(r, sdp_LT(f, u, K), u, O, K)
f = sdp_del_LT(f)
return Q, r
def sdp_rem(f, g, u, O, K):
"""Returns polynomial remainder in `K[X]`. """
return sdp_div(f, g, u, O, K)[1]
def sdp_quo(f, g, u, O, K):
"""Returns polynomial quotient in `K[X]`. """
q, r = sdp_div(f, g, u, O, K)
if not r:
return q
else:
raise ExactQuotientFailed('%s does not divide %s' % (g, f))
def sdp_exquo(f, g, u, O, K):
"""Returns exact polynomial quotient in `K[x]`. """
return sdp_div(f, g, u, O, K)[0]
def sdp_lcm(f, g, u, O, K):
"""Computes LCM of two polynomials in `K[X]`.
The LCM is computed as the unique generater of the intersection
of the two ideals generated by `f` and `g`. The approach is to
compute a Groebner basis with respect to lexicographic ordering
of `t*f` and `(1 - t)*g`, where `t` is an unrealted variable and
then filtering out the solution that doesn't contain `t`.
References
==========
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and
Algorithms, Springer, Second Edition, 1997, pp. 187
"""
if not f or not g:
return []
if sdp_term_p(f) and sdp_term_p(g):
monom = monomial_lcm(sdp_LM(f, u), sdp_LM(g, u))
fc, gc = sdp_LC(f, K), sdp_LC(g, K)
if K.has_Field:
coeff = K.one
else:
coeff = K.lcm(fc, gc)
return [(monom, coeff)]
if not K.has_Field:
lcm = K.one
else:
fc, f = sdp_primitive(f, K)
gc, g = sdp_primitive(g, K)
lcm = K.lcm(fc, gc)
f_terms = tuple( ((1,) + m, c) for m, c in f )
g_terms = tuple( ((0,) + m, c) for m, c in g ) \
+ tuple( ((1,) + m, -c) for m, c in g )
F = sdp_sort(f_terms, O_lex)
G = sdp_sort(g_terms, O_lex)
basis = sdp_groebner([F, G], u, O_lex, K)
H = [ h for h in basis if sdp_indep_p(h, 0, u) ]
if K.is_one(lcm):
h = [ (m[1:], c) for m, c in H[0] ]
else:
h = [ (m[1:], c*lcm) for m, c in H[0] ]
return sdp_sort(h, O)
def sdp_gcd(f, g, u, O, K):
"""Compute GCD of two polynomials in `K[X]` via LCM. """
if not K.has_Field:
fc, f = sdp_primitive(f, K)
gc, g = sdp_primitive(g, K)
gcd = K.gcd(fc, gc)
h = sdp_quo(sdp_mul(f, g, u, O, K),
sdp_lcm(f, g, u, O, K), u, O, K)
if not K.has_Field:
if K.is_one(gcd):
return h
else:
return [ (m, c*gcd) for m, c in h ]
else:
return sdp_monic(h, K)
def sdp_groebner(F, u, O, K):
"""Computes Groebner basis for a set of polynomials in `K[X]`.
Given a set of multivariate polynomials `F`, finds another
set `G`, such that Ideal `F = Ideal G` and `G` is a reduced
Groebner basis.
The resulting basis is unique and has monic generators if the
ground domains is a field. Otherwise the result is non-unique
but Groebner bases over e.g. integers can be computed (if the
input polynomials are monic).
Groebner bases can be used to choose specific generators for a
polynomial ideal. Because these bases are unique you can check
for ideal equality by comparing the Groebner bases. To see if
one polynomial lies in an ideal, divide by the elements in the
base and see if the remainder vanishes.
They can also be used to solve systems of polynomial equations
as, by choosing lexicographic ordering, you can eliminate one
variable at a time, provided that the ideal is zero-dimensional
(finite number of solutions).
References
==========
.. [Bose03] N.K. Bose, B. Buchberger, J.P. Guiver, Multidimensional
Systems Theory and Applications, Springer, 2003, pp. 98+
.. [Giovini91] A. Giovini, T. Mora, "One sugar cube, please" or
Selection strategies in Buchberger algorithm, ISSAC '91, ACM
.. [Ajwa95] I.A. Ajwa, Z. Liu, P.S. Wang, Groebner Bases Algorithm,
http://citeseer.ist.psu.edu/ajwa95grbner.html, 1995
.. [Cox97] D. Cox, J. Little, D. O'Shea, Ideals, Varieties and
Algorithms, Springer, Second Edition, 1997, pp. 62
"""
F = [ f for f in F if f ]
if not F:
return [[]]
R, P, G, B, I = set(), set(), set(), {}, {}
for i, f in enumerate(F):
I[tuple(f)] = i
R.add(i)
def normal(g, J):
h = sdp_rem(g, [ F[j] for j in J ], u, O, K)
if not h:
return None
else:
H = tuple(h)
if not H in I:
I[H] = len(F)
F.append(h)
return I[H], sdp_LM(h, u)
def generate(R, P, G, B):
while R:
h = normal(F[R.pop()], G | P)
if h is not None:
k, LM = h
G0 = set(g for g in G if monomial_div(sdp_LM(F[g], u), LM))
P0 = set(p for p in P if monomial_div(sdp_LM(F[p], u), LM))
G, P, R = G - G0, P - P0 | set([k]), R | G0 | P0
for i, j in set(B):
if i in G0 or j in G0:
del B[(i, j)]
G |= P
for i in G:
for j in P:
if i == j:
continue
if i < j:
k = (i, j)
else:
k = (j, i)
if k not in B:
B[k] = monomial_lcm(sdp_LM(F[i], u), sdp_LM(F[j], u))
G = set([ normal(F[g], G - set([g]))[0] for g in G ])
return R, P, G, B
R, P, G, B = generate(R, P, G, B)
while B:
k, M = B.items()[0]
for l, N in B.iteritems():
if O(M, N) == 1:
k, M = l, N
del B[k]
i, j = k[0], k[1]
p, q = F[i], F[j]
p_LM, q_LM = sdp_LM(p, u), sdp_LM(q, u)
if M == monomial_mul(p_LM, q_LM):
continue
criterion = False
for g in G:
if g == i or g == j:
continue
if (min(i, g), max(i, g)) not in B:
continue
if (min(j, g), max(j, g)) not in B:
continue
if not monomial_div(M, sdp_LM(F[g], u)):
continue
criterion = True
break
if criterion:
continue
p = sdp_mul_term(p, (monomial_div(M, p_LM), K.quo(K.one, sdp_LC(p, K))), u, O, K)
q = sdp_mul_term(q, (monomial_div(M, q_LM), K.quo(K.one, sdp_LC(q, K))), u, O, K)
h = normal(sdp_sub(p, q, u, O, K), G)
if h is not None:
k, LM = h
G0 = set(g for g in G if monomial_div(sdp_LM(F[g], u), LM))
R, P, G = G0, set([k]), G - G0
for i, j in set(B):
if i in G0 or j in G0:
del B[(i, j)]
R, P, G, B = generate(R, P, G, B)
if K.has_Field:
basis = [ sdp_monic(F[g], K) for g in G ]
else:
basis = []
for g in G:
_, g = sdp_primitive(F[g], K)
if K.is_negative(sdp_LC(g, K)):
basis.append(sdp_neg(g, u, O, K))
else:
basis.append(g)
return list(sorted(basis, O, lambda p: sdp_LM(p, u), True))
| bsd-3-clause | ccc2c22c802603cb64c72f14bc13ece5 | 24.473201 | 89 | 0.478899 | 2.934204 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/image/codecs/png.py | 39 | 3777 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Encoder and decoder for PNG files, using PyPNG (pypng.py).
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import array
from pyglet.gl import *
from pyglet.image import *
from pyglet.image.codecs import *
import pyglet.image.codecs.pypng
class PNGImageDecoder(ImageDecoder):
def get_file_extensions(self):
return ['.png']
def decode(self, file, filename):
try:
reader = pyglet.image.codecs.pypng.Reader(file=file)
width, height, pixels, metadata = reader.read()
except Exception, e:
raise ImageDecodeException(
'PyPNG cannot read %r: %s' % (filename or file, e))
if metadata['greyscale']:
if metadata['has_alpha']:
format = 'LA'
else:
format = 'L'
else:
if metadata['has_alpha']:
format = 'RGBA'
else:
format = 'RGB'
pitch = len(format) * width
return ImageData(width, height, format, pixels.tostring(), -pitch)
class PNGImageEncoder(ImageEncoder):
def get_file_extensions(self):
return ['.png']
def encode(self, image, file, filename):
image = image.get_image_data()
has_alpha = 'A' in image.format
greyscale = len(image.format) < 3
if has_alpha:
if greyscale:
image.format = 'LA'
else:
image.format = 'RGBA'
else:
if greyscale:
image.format = 'L'
else:
image.format = 'RGB'
image.pitch = -(image.width * len(image.format))
writer = pyglet.image.codecs.pypng.Writer(
image.width, image.height,
bytes_per_sample=1,
greyscale=greyscale,
has_alpha=has_alpha)
data = array.array('B')
data.fromstring(image.data)
writer.write_array(file, data)
def get_decoders():
return [PNGImageDecoder()]
def get_encoders():
return [PNGImageEncoder()]
| bsd-3-clause | 8fbc23df2b9bbb98c921d9825c9d5fb5 | 33.336364 | 78 | 0.618216 | 4.376593 | false | false | false | false |
mattpap/sympy-polys | sympy/polys/tests/test_polyroots.py | 2 | 9365 |
from sympy import S, symbols, Symbol, Integer, Rational, \
sqrt, I, raises, powsimp, Lambda
from sympy.polys import Poly
from sympy.polys.polyroots import root_factors, roots_linear, \
roots_quadratic, roots_cubic, roots_quartic, roots_binomial, \
roots_rational, roots, number_of_real_roots, RootOf, RootsOf, RootSum
a, b, c, d, t, x, y, z = symbols('a,b,c,d,t,x,y,z')
def test_number_of_real_roots():
assert number_of_real_roots(0, x) == 0
assert number_of_real_roots(7, x) == 0
f = Poly(x - 1, x)
assert number_of_real_roots(f) == 1
assert number_of_real_roots(f, sup=0) == 0
assert number_of_real_roots(f, inf=1) == 0
assert number_of_real_roots(f, sup=0, inf=1) == 1
assert number_of_real_roots(f, sup=1, inf=0) == 1
f = x**2 - 4
assert number_of_real_roots(f, x) == 2
assert number_of_real_roots(f, x, sup=0) == 1
assert number_of_real_roots(f, x, inf=-1, sup=1) == 0
raises(ValueError, "number_of_real_roots(f, x, inf=t)")
raises(ValueError, "number_of_real_roots(f, x, sup=t)")
def test_roots_linear():
assert roots_linear(Poly(2*x+1, x)) == [-Rational(1, 2)]
def test_roots_quadratic():
assert roots_quadratic(Poly(2*x**2, x)) == [0, 0]
assert roots_quadratic(Poly(2*x**2+3*x, x)) == [0, -Rational(3, 2)]
assert roots_quadratic(Poly(2*x**2+3, x)) == [I*sqrt(6)/2, -I*sqrt(6)/2]
assert roots_quadratic(Poly(2*x**2+4*x+3, x)) == \
[-1 + I*sqrt(2)/2, -1 - I*sqrt(2)/2]
def test_roots_cubic():
assert roots_cubic(Poly(2*x**3, x)) == [0, 0, 0]
assert roots_cubic(Poly(x**3-3*x**2+3*x-1, x)) == [1, 1, 1]
assert roots_cubic(Poly(x**3+1, x)) == \
[-1, S.Half - I*sqrt(3)/2, S.Half + I*sqrt(3)/2]
def test_roots_quartic():
assert roots_quartic(Poly(x**4, x)) == [0, 0, 0, 0]
assert roots_quartic(Poly(x**4 + x**3, x)) in [
[-1,0,0,0],
[0,-1,0,0],
[0,0,-1,0],
[0,0,0,-1]
]
assert roots_quartic(Poly(x**4 - x**3, x)) in [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
]
lhs = roots_quartic(Poly(x**4 + x, x))
rhs = [S.Half + I*sqrt(3)/2, S.Half - I*sqrt(3)/2, S.Zero, -S.One]
assert sorted(lhs, key=hash) == sorted(rhs, key=hash)
def test_roots_binomial():
assert roots_binomial(Poly(5*x, x)) == [0]
assert roots_binomial(Poly(5*x**4, x)) == [0, 0, 0, 0]
assert roots_binomial(Poly(5*x+2, x)) == [-Rational(2, 5)]
A = 10**Rational(3, 4)/10
assert roots_binomial(Poly(5*x**4+2, x)) == \
[A+I*A, -A+I*A, -A-I*A, A-I*A]
a1 = Symbol('a1', nonnegative=True)
b1 = Symbol('b1', nonnegative=True)
r0 = roots_quadratic(Poly(a1*x**2 + b1, x))
r1 = roots_binomial(Poly(a1*x**2 + b1, x))
assert powsimp(r0[0]) == powsimp(r1[0])
assert powsimp(r0[1]) == powsimp(r1[1])
def test_roots_rational():
assert roots_rational(Poly(x**2-1, x)) == [S.One, -S.One]
assert roots_rational(Poly(x**2-x, x)) == [S.Zero, S.One]
assert roots_rational(Poly(x**2-x/2, x)) == [S.Zero]
assert roots_rational(Poly(2*x**2-x, x)) == [S.Zero]
assert roots_rational(Poly(t*x**2-x, x)) == []
def test_roots():
assert roots(1, x) == {}
assert roots(x, x) == {S.Zero: 1}
assert roots(x**9, x) == {S.Zero: 9}
assert roots(((x-2)*(x+3)*(x-4)).expand(), x) == {-S(3): 1, S(2): 1, S(4): 1}
assert roots(2*x+1, x) == {-S.Half: 1}
assert roots((2*x+1)**2, x) == {-S.Half: 2}
assert roots((2*x+1)**5, x) == {-S.Half: 5}
assert roots((2*x+1)**10, x) == {-S.Half: 10}
assert roots(x**4 - 1, x) == {I: 1, S.One: 1, -S.One: 1, -I: 1}
assert roots((x**4 - 1)**2, x) == {I: 2, S.One: 2, -S.One: 2, -I: 2}
assert roots(((2*x-3)**2).expand(), x) == { Rational(3,2): 2}
assert roots(((2*x+3)**2).expand(), x) == {-Rational(3,2): 2}
assert roots(((2*x-3)**3).expand(), x) == { Rational(3,2): 3}
assert roots(((2*x+3)**3).expand(), x) == {-Rational(3,2): 3}
assert roots(((2*x-3)**5).expand(), x) == { Rational(3,2): 5}
assert roots(((2*x+3)**5).expand(), x) == {-Rational(3,2): 5}
assert roots(((a*x-b)**5).expand(), x) == { b/a: 5}
assert roots(((a*x+b)**5).expand(), x) == {-b/a: 5}
assert roots(x**4-2*x**2+1, x) == {S.One: 2, -S.One: 2}
assert roots(x**6-4*x**4+4*x**3-x**2, x) == \
{S.One: 2, -1 - sqrt(2): 1, S.Zero: 2, -1 + sqrt(2): 1}
assert roots(x**8-1, x) == {
2**S.Half/2 + I*2**S.Half/2: 1,
2**S.Half/2 - I*2**S.Half/2: 1,
-2**S.Half/2 + I*2**S.Half/2: 1,
-2**S.Half/2 - I*2**S.Half/2: 1,
S.One: 1, -S.One: 1, I: 1, -I: 1
}
assert roots(-2016*x**2 - 5616*x**3 - 2056*x**4 + 3324*x**5 + 2176*x**6 \
- 224*x**7 - 384*x**8 - 64*x**9, x) == {S(0): 2, -S(2): 2, S(2): 1, -S(7)/2: 1,\
-S(3)/2: 1, -S(1)/2: 1, S(3)/2: 1}
assert roots((a+b+c)*x + a+b+c+d, x) == \
{ (-a-b-c-d) / (a+b+c) : 1 }
assert roots(x**3+x**2-x+1, x, cubics=False) == {}
assert roots(((x-2)*(x+3)*(x-4)).expand(), x, cubics=False) == {-S(3): 1, S(2): 1, S(4): 1}
assert roots(((x-2)*(x+3)*(x-4)*(x-5)).expand(), x, cubics=False) == \
{-S(3): 1, S(2): 1, S(4): 1, S(5): 1}
assert roots(x**3 + 2*x**2 + 4*x + 8, x) == {-S(2): 1, -2*I: 1, 2*I: 1}
assert roots(x**3 + 2*x**2 + 4*x + 8, x, cubics=True) == \
{-2*I: 1, 2*I: 1, -S(2): 1}
assert roots((x**2 - x)*(x**3 + 2*x**2 + 4*x + 8), x ) == \
{S(1): 1, S(0): 1, -S(2): 1, -2*I: 1, 2*I: 1}
r1_2, r1_3, r1_9, r4_9, r19_27 = [ Rational(*r) \
for r in ((1,2), (1,3), (1,9), (4,9), (19,27)) ]
U = r1_2 + r1_2*I*3**r1_2
V = r1_2 - r1_2*I*3**r1_2
W = (r19_27 + r1_9*33**r1_2)**r1_3
assert roots(x**3+x**2-x+1, x, cubics=True) == {
-r1_3 + U*W + r4_9*(U*W)**(-1): 1,
-r1_3 + V*W + r4_9*(V*W)**(-1): 1,
-r1_3 - W - r4_9*( W)**(-1): 1,
}
f = (x**2+2*x+3).subs(x, 2*x**2 + 3*x).subs(x, 5*x-4)
r1_2, r13_20, r1_100 = [ Rational(*r) \
for r in ((1,2), (13,20), (1,100)) ]
assert roots(f, x) == {
r13_20 + r1_100*(25 - 200*I*2**r1_2)**r1_2: 1,
r13_20 - r1_100*(25 - 200*I*2**r1_2)**r1_2: 1,
r13_20 + r1_100*(25 + 200*I*2**r1_2)**r1_2: 1,
r13_20 - r1_100*(25 + 200*I*2**r1_2)**r1_2: 1,
}
f = x**4 + x**3 + x**2 + x + 1
r1_2, r1_4, r5_2 = [ Rational(*r) for r in ((1,2), (1,4), (5,2)) ]
assert roots(f, x) == {
-r1_4 + r1_4*5**r1_2 + r1_2*(-r5_2 - r1_2*5**r1_2)**r1_2: 1,
-r1_4 + r1_4*5**r1_2 - r1_2*(-r5_2 - r1_2*5**r1_2)**r1_2: 1,
-r1_4 - r1_4*5**r1_2 + r1_2*(-r5_2 + r1_2*5**r1_2)**r1_2: 1,
-r1_4 - r1_4*5**r1_2 - r1_2*(-r5_2 + r1_2*5**r1_2)**r1_2: 1,
}
f = z**3 + (-2 - y)*z**2 + (1 + 2*y - 2*x**2)*z - y + 2*x**2
assert roots(f, z) == {
S.One: 1,
S.Half + S.Half*y + S.Half*(1 - 2*y + y**2 + 8*x**2)**S.Half: 1,
S.Half + S.Half*y - S.Half*(1 - 2*y + y**2 + 8*x**2)**S.Half: 1,
}
assert roots(a*b*c*x**3 + 2*x**2 + 4*x + 8, x, cubics=False) == {}
assert roots(a*b*c*x**3 + 2*x**2 + 4*x + 8, x, cubics=True) != {}
assert roots(x**4-1, x, filter='Z') == {S.One: 1, -S.One: 1}
assert roots(x**4-1, x, filter='I') == {I: 1, -I: 1}
assert roots((x-1)*(x+1), x) == {S.One: 1, -S.One: 1}
assert roots((x-1)*(x+1), x, predicate=lambda r: r.is_positive) == {S.One: 1}
assert roots(x**4-1, x, filter='Z', multiple=True) == [S.One, -S.One]
assert roots(x**4-1, x, filter='I', multiple=True) in ([I, -I], [-I, I])
assert roots(x**3, x, multiple=True) == [S.Zero, S.Zero, S.Zero]
assert roots(1234, x, multiple=True) == []
def test_roots2():
"""Just test that calculating these roots does not hang
(final result is not checked)
"""
a, b, c, d, x = symbols("a b c d x")
f1 = x**2*c + (a/b) + x*c*d - a
f2 = x**2*(a + b*(c-d)*a) + x*a*b*c/(b*d-d) + (a*d-c/d)
assert roots(f1, x).values() == [1, 1]
assert roots(f2, x).values() == [1, 1]
def test_root_factors():
assert root_factors(Poly(1, x)) == [Poly(1, x)]
assert root_factors(Poly(x, x)) == [Poly(x, x)]
assert root_factors(Poly(x**2-1, x)) == [Poly(x-1, x), Poly(x+1, x)]
factors = root_factors(Poly((x**4 - 1)**2, x))
assert len(factors) == 8
assert set(factors) == set([Poly(x-I, x), Poly(x-1, x), Poly(x+1, x), Poly(x+I, x)])
assert root_factors(Poly(x**4-1, x), filter='Z') == \
[Poly(x-1, x), Poly(x+1, x), Poly(x**2+1, x)]
def test_RootsOf():
f = Poly((x-4)**4, x)
roots = RootsOf(f)
assert roots.count == 4
assert list(roots.roots()) == [ Integer(4),
Integer(4), Integer(4), Integer(4) ]
assert RootSum(lambda r: r**2, f) == 64
roots = RootsOf(x**5+x+1, x)
assert roots.count == 5
f = Poly(x**5+x+1, x)
assert list(roots.roots()) == [ RootOf(f, 0), RootOf(f, 1),
RootOf(f, 2), RootOf(f, 3), RootOf(f, 4) ]
assert RootSum(lambda r: r**2, f).doit() == RootOf(f, 0)**2 + \
RootOf(f, 1)**2 + RootOf(f, 2)**2 + RootOf(f, 3)**2 + RootOf(f, 4)**2
assert RootSum(Lambda(x, x), Poly(0, x), evaluate=True) == S.Zero
assert RootSum(Lambda(x, x), Poly(0, x), evaluate=False) != S.Zero
assert RootSum(Lambda(x, x), Poly(x-1, x), evaluate=False).doit() == S.One
| bsd-3-clause | 859162e31ec41a6f17eec04e6d10295b | 33.94403 | 95 | 0.488841 | 2.21029 | false | false | false | false |
mattpap/sympy-polys | sympy/logic/algorithms/dpll.py | 1 | 9083 | """Implementation of DPLL algorithm
Further improvements: eliminate calls to pl_true, implement branching rules,
efficient unit propagation.
References:
- http://en.wikipedia.org/wiki/DPLL_algorithm
- http://bioinformatics.louisville.edu/ouyang/MingOuyangThesis.pdf
"""
from sympy.core import Symbol
from sympy.logic.boolalg import Or, Not, conjuncts, disjuncts, to_cnf, \
to_int_repr
from sympy.logic.inference import pl_true, literal_symbol
def dpll_satisfiable(expr):
"""
Check satisfiability of a propositional sentence.
It returns a model rather than True when it succeeds
>>> from sympy import symbols
>>> from sympy.abc import A, B
>>> from sympy.logic.algorithms.dpll import dpll_satisfiable
>>> dpll_satisfiable(A & ~B)
{A: True, B: False}
>>> dpll_satisfiable(A & ~A)
False
"""
symbols = list(expr.atoms(Symbol))
symbols_int_repr = set(range(1, len(symbols) + 1))
clauses = conjuncts(to_cnf(expr))
clauses_int_repr = to_int_repr(clauses, symbols)
result = dpll_int_repr(clauses_int_repr, symbols_int_repr, {})
if not result:
return result
output = {}
for key in result:
output.update({symbols[key-1]: result[key]})
return output
def dpll(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Clauses is an array of conjuncts.
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import dpll
>>> dpll([A, B, D], [A, B], {D: False})
False
"""
# compute DP kernel
P, value = find_unit_clause(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value: P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_unit_clause(clauses, model)
P, value = find_pure_symbol(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value: P = ~P
clauses = unit_propagate(clauses, P)
P, value = find_pure_symbol(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true(c, model)
if val == False:
return False
if val != True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
if not clauses: return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols[:]
return (dpll(unit_propagate(unknown_clauses, P), symbols, model) or
dpll(unit_propagate(unknown_clauses, Not(P)), symbols_copy, model_copy))
def dpll_int_repr(clauses, symbols, model):
"""
Compute satisfiability in a partial model.
Arguments are expected to be in integer representation
>>> from sympy.logic.algorithms.dpll import dpll_int_repr
>>> dpll_int_repr([set([1]), set([2]), set([3])], set([1, 2]), {3: False})
False
"""
# compute DP kernel
P, value = find_unit_clause_int_repr(clauses, model)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_unit_clause_int_repr(clauses, model)
P, value = find_pure_symbol_int_repr(symbols, clauses)
while P:
model.update({P: value})
symbols.remove(P)
if not value:
P = -P
clauses = unit_propagate_int_repr(clauses, P)
P, value = find_pure_symbol_int_repr(symbols, clauses)
# end DP kernel
unknown_clauses = []
for c in clauses:
val = pl_true_int_repr(c, model)
if val is False:
return False
if val is not True:
unknown_clauses.append(c)
if not unknown_clauses:
return model
P = symbols.pop()
model_copy = model.copy()
model.update({P: True})
model_copy.update({P: False})
symbols_copy = symbols.copy()
return (dpll_int_repr(unit_propagate_int_repr(unknown_clauses, P), symbols, model) or
dpll_int_repr(unit_propagate_int_repr(unknown_clauses, -P), symbols_copy, model_copy))
### helper methods for DPLL
def pl_true_int_repr(clause, model={}):
"""
Lightweight version of pl_true.
Argument clause represents the set of args of an Or clause. This is used
inside dpll_int_repr, it is not meant to be used directly.
>>> from sympy.logic.algorithms.dpll import pl_true_int_repr
>>> pl_true_int_repr(set([1, 2]), {1: False})
>>> pl_true_int_repr(set([1, 2]), {1: False, 2: False})
False
"""
result = False
for lit in clause:
if lit < 0:
p = model.get(-lit)
if p is not None:
p = not p
else:
p = model.get(lit)
if p is True:
return True
elif p is None:
result = None
return result
def unit_propagate(clauses, symbol):
"""
Returns an equivalent set of clauses
If a set of clauses contains the unit clause l, the other clauses are
simplified by the application of the two following rules:
1. every clause containing l is removed
2. in every clause that contains ~l this literal is deleted
Arguments are expected to be in CNF.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import unit_propagate
>>> unit_propagate([A | B, D | ~B, B], B)
[D, B]
"""
output = []
for c in clauses:
if c.func != Or:
output.append(c)
continue
for arg in c.args:
if arg == ~symbol:
output.append(Or(*[x for x in c.args if x != ~symbol]))
break
if arg == symbol:
break
else:
output.append(c)
return output
def unit_propagate_int_repr(clauses, s):
"""
Same as unit_propagate, but arguments are expected to be in integer
representation
>>> from sympy.logic.algorithms.dpll import unit_propagate_int_repr
>>> unit_propagate_int_repr([set([1, 2]), set([3, -2]), set([2])], 2)
[set([3])]
"""
negated = set([-s])
return [clause - negated for clause in clauses if s not in clause]
def find_pure_symbol(symbols, unknown_clauses):
"""
Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_pure_symbol
>>> find_pure_symbol([A, B, D], [A|~B,~B|~D,D|A])
(A, True)
"""
for sym in symbols:
found_pos, found_neg = False, False
for c in unknown_clauses:
if not found_pos and sym in disjuncts(c): found_pos = True
if not found_neg and Not(sym) in disjuncts(c): found_neg = True
if found_pos != found_neg: return sym, found_pos
return None, None
def find_pure_symbol_int_repr(symbols, unknown_clauses):
"""
Same as find_pure_symbol, but arguments are expected
to be in integer representation
>>> from sympy.logic.algorithms.dpll import find_pure_symbol_int_repr
>>> find_pure_symbol_int_repr(set([1,2,3]), [set([1, -2]), set([-2, -3]), set([3, 1])])
(1, True)
"""
all_symbols = set()
for c in unknown_clauses:
all_symbols.update(c)
found_pos = all_symbols.intersection(symbols)
found_neg = all_symbols.intersection([-s for s in symbols])
for p in found_pos:
if -p not in found_neg:
return p, True
for p in found_neg:
if -p not in found_pos:
return -p, False
return None, None
def find_unit_clause(clauses, model):
"""
A unit clause has only 1 variable that is not bound in the model.
>>> from sympy import symbols
>>> from sympy.abc import A, B, D
>>> from sympy.logic.algorithms.dpll import find_unit_clause
>>> find_unit_clause([A | B | D, B | ~D, A | ~B], {A:True})
(B, False)
"""
for clause in clauses:
num_not_in_model = 0
for literal in disjuncts(clause):
sym = literal_symbol(literal)
if sym not in model:
num_not_in_model += 1
P, value = sym, not (literal.func is Not)
if num_not_in_model == 1:
return P, value
return None, None
def find_unit_clause_int_repr(clauses, model):
"""
Same as find_unit_clause, but arguments are expected to be in
integer representation.
>>> from sympy.logic.algorithms.dpll import find_unit_clause_int_repr
>>> find_unit_clause_int_repr([set([1, 2, 3]), set([2, -3]), set([1, -2])], {1: True})
(2, False)
"""
bound = set(model) | set(-sym for sym in model)
for clause in clauses:
unbound = clause - bound
if len(unbound) == 1:
p = unbound.pop()
if p < 0:
return -p, False
else:
return p, True
return None, None
| bsd-3-clause | 763157d0833354a0ff94087484f92c51 | 30.429066 | 98 | 0.594848 | 3.577393 | false | false | false | false |
mattpap/sympy-polys | sympy/ntheory/partitions_.py | 10 | 2851 | from sympy.mpmath.libmp import (fzero,
from_man_exp, from_int, from_rational,
fone, fhalf, bitcount, to_int, to_str, mpf_mul, mpf_div, mpf_sub,
mpf_add, mpf_sqrt, mpf_pi, mpf_cosh_sinh, pi_fixed, mpf_cos)
from sympy.core.numbers import igcd
import math
def A(n, j, prec):
"""Compute the inner sum in the HRR formula."""
if j == 1:
return fone
s = fzero
pi = pi_fixed(prec)
for h in xrange(1, j):
if igcd(h,j) != 1:
continue
# & with mask to compute fractional part of fixed-point number
one = 1 << prec
onemask = one - 1
half = one >> 1
g = 0
if j >= 3:
for k in xrange(1, j):
t = h*k*one//j
if t > 0: frac = t & onemask
else: frac = -((-t) & onemask)
g += k*(frac - half)
g = ((g - 2*h*n*one)*pi//j) >> prec
s = mpf_add(s, mpf_cos(from_man_exp(g, -prec), prec), prec)
return s
def D(n, j, prec, sq23pi, sqrt8):
"""
Compute the sinh term in the outer sum of the HRR formula.
The constants sqrt(2/3*pi) and sqrt(8) must be precomputed.
"""
j = from_int(j)
pi = mpf_pi(prec)
a = mpf_div(sq23pi, j, prec)
b = mpf_sub(from_int(n), from_rational(1,24,prec), prec)
c = mpf_sqrt(b, prec)
ch, sh = mpf_cosh_sinh(mpf_mul(a,c), prec)
D = mpf_div(mpf_sqrt(j,prec), mpf_mul(mpf_mul(sqrt8,b),pi), prec)
E = mpf_sub(mpf_mul(a,ch), mpf_div(sh,c,prec), prec)
return mpf_mul(D, E)
def npartitions(n, verbose=False):
"""
Calculate the partition function P(n), i.e. the number of ways that
n can be written as a sum of positive integers.
P(n) is computed using the Hardy-Ramanujan-Rademacher formula,
described e.g. at http://mathworld.wolfram.com/PartitionFunctionP.html
The correctness of this implementation has been tested for 10**n
up to n = 8.
"""
n = int(n)
if n < 0: return 0
if n <= 5: return [1, 1, 2, 3, 5, 7][n]
# Estimate number of bits in p(n). This formula could be tidied
pbits = int((math.pi*(2*n/3.)**0.5-math.log(4*n))/math.log(10)+1)*\
math.log(10,2)
prec = p = int(pbits*1.1 + 100)
s = fzero
M = max(6, int(0.24*n**0.5+4))
sq23pi = mpf_mul(mpf_sqrt(from_rational(2,3,p), p), mpf_pi(p), p)
sqrt8 = mpf_sqrt(from_int(8), p)
for q in xrange(1, M):
a = A(n,q,p)
d = D(n,q,p, sq23pi, sqrt8)
s = mpf_add(s, mpf_mul(a, d), prec)
if verbose:
print "step", q, "of", M, to_str(a, 10), to_str(d, 10)
# On average, the terms decrease rapidly in magnitude. Dynamically
# reducing the precision greatly improves performance.
p = bitcount(abs(to_int(d))) + 50
np = to_int(mpf_add(s, fhalf, prec))
return int(np)
__all__ = ['npartitions']
| bsd-3-clause | 605c3c334e821d2f4e8de612c1f8572d | 34.197531 | 74 | 0.558401 | 2.814413 | false | false | false | false |
mattpap/sympy-polys | sympy/mpmath/rational.py | 1 | 5145 | import operator
from libmp import int_types, mpf_hash, bitcount
new = object.__new__
def create_reduced(p, q, _cache={}):
key = p, q
if key in _cache:
return _cache[key]
x, y = p, q
while y:
x, y = y, x % y
if x != 1:
p //= x
q //= x
v = new(mpq)
v._mpq_ = p, q
# Speedup integers, half-integers and other small fractions
if q <= 4 and abs(key[0]) < 100:
_cache[key] = v
return v
class mpq(object):
"""
Exact rational type, currently only intended for internal use.
"""
__slots__ = ["_mpq_"]
def __new__(cls, p, q=1):
if type(p) is tuple:
p, q = p
elif hasattr(p, '_mpq_'):
p, q = p._mpq_
return create_reduced(p, q)
def __repr__(s):
return "mpq(%s,%s)" % s._mpq_
def __str__(s):
return "(%s/%s)" % s._mpq_
def __int__(s):
a, b = s._mpq_
return a // b
def __nonzero__(s):
return bool(s._mpq_[0])
def __hash__(s):
a, b = s._mpq_
if b == 1:
return hash(a)
# Power of two: mpf compatible hash
if not (b & (b-1)):
return mpf_hash(from_man_exp(a, 1-bitcount(b)))
return hash((a,b))
def __eq__(s, t):
ttype = type(t)
if ttype is mpq:
return s._mpq_ == t._mpq_
if ttype in int_types:
a, b = s._mpq_
if b != 1:
return False
return a == t
return NotImplemented
def __ne__(s, t):
ttype = type(t)
if ttype is mpq:
return s._mpq_ != t._mpq_
if ttype in int_types:
a, b = s._mpq_
if b != 1:
return True
return a != t
return NotImplemented
def _cmp(s, t, op):
ttype = type(t)
if ttype in int_types:
a, b = s._mpq_
return op(a, t*b)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return op(a*d, b*c)
return NotImplementedError
def __lt__(s, t): return s._cmp(t, operator.lt)
def __le__(s, t): return s._cmp(t, operator.le)
def __gt__(s, t): return s._cmp(t, operator.gt)
def __ge__(s, t): return s._cmp(t, operator.ge)
def __abs__(s):
a, b = s._mpq_
if a >= 0:
return s
v = new(mpq)
v._mpq_ = -a, b
return v
def __neg__(s):
a, b = s._mpq_
v = new(mpq)
v._mpq_ = -a, b
return v
def __pos__(s):
return s
def __add__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*d+b*c, b*d)
if ttype in int_types:
a, b = s._mpq_
v = new(mpq)
v._mpq_ = a+b*t, b
return v
return NotImplemented
__radd__ = __add__
def __sub__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*d-b*c, b*d)
if ttype in int_types:
a, b = s._mpq_
v = new(mpq)
v._mpq_ = a-b*t, b
return v
return NotImplemented
def __rsub__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(b*c-a*d, b*d)
if ttype in int_types:
a, b = s._mpq_
v = new(mpq)
v._mpq_ = b*t-a, b
return v
return NotImplemented
def __mul__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*c, b*d)
if ttype in int_types:
a, b = s._mpq_
return create_reduced(a*t, b)
return NotImplemented
__rmul__ = __mul__
def __div__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(a*d, b*c)
if ttype in int_types:
a, b = s._mpq_
return create_reduced(a, b*t)
return NotImplemented
def __rdiv__(s, t):
ttype = type(t)
if ttype is mpq:
a, b = s._mpq_
c, d = t._mpq_
return create_reduced(b*c, a*d)
if ttype in int_types:
a, b = s._mpq_
return create_reduced(b*t, a)
return NotImplemented
def __pow__(s, t):
ttype = type(t)
if ttype in int_types:
a, b = s._mpq_
if t:
if t < 0:
a, b, t = b, a, -t
v = new(mpq)
v._mpq_ = a**t, b**t
return v
raise ZeroDivisionError
return NotImplemented
mpq_1 = mpq((1,1))
mpq_0 = mpq((0,1))
mpq_1_2 = mpq((1,2))
mpq_3_2 = mpq((3,2))
mpq_1_4 = mpq((1,4))
mpq_1_16 = mpq((1,16))
mpq_3_16 = mpq((3,16))
mpq_5_2 = mpq((5,2))
mpq_3_4 = mpq((3,4))
mpq_7_4 = mpq((7,4))
mpq_5_4 = mpq((5,4))
| bsd-3-clause | ffca761862985597035cbb9379e50714 | 22.930233 | 66 | 0.417298 | 3.187732 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/media/avbin.py | 5 | 16893 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Use avbin to decode audio and video media.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: avbin.py 2084 2008-05-27 12:42:19Z Alex.Holkner $'
from pyglet.media import (MediaFormatException, StreamingSource,
VideoFormat, AudioFormat, AudioData)
import pyglet
from pyglet import gl
from pyglet.gl import gl_info
from pyglet import image
import pyglet.lib
import ctypes
av = pyglet.lib.load_library('avbin',
darwin='/usr/local/lib/libavbin.dylib')
AVBIN_RESULT_ERROR = -1
AVBIN_RESULT_OK = 0
AVbinResult = ctypes.c_int
AVBIN_STREAM_TYPE_UNKNOWN = 0
AVBIN_STREAM_TYPE_VIDEO = 1
AVBIN_STREAM_TYPE_AUDIO = 2
AVbinStreamType = ctypes.c_int
AVBIN_SAMPLE_FORMAT_U8 = 0
AVBIN_SAMPLE_FORMAT_S16 = 1
AVBIN_SAMPLE_FORMAT_S24 = 2
AVBIN_SAMPLE_FORMAT_S32 = 3
AVBIN_SAMPLE_FORMAT_FLOAT = 4
AVbinSampleFormat = ctypes.c_int
AVBIN_LOG_QUIET = -8
AVBIN_LOG_PANIC = 0
AVBIN_LOG_FATAL = 8
AVBIN_LOG_ERROR = 16
AVBIN_LOG_WARNING = 24
AVBIN_LOG_INFO = 32
AVBIN_LOG_VERBOSE = 40
AVBIN_LOG_DEBUG = 48
AVbinLogLevel = ctypes.c_int
AVbinFileP = ctypes.c_void_p
AVbinStreamP = ctypes.c_void_p
Timestamp = ctypes.c_int64
class AVbinFileInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('n_streams', ctypes.c_int),
('start_time', Timestamp),
('duration', Timestamp),
('title', ctypes.c_char * 512),
('author', ctypes.c_char * 512),
('copyright', ctypes.c_char * 512),
('comment', ctypes.c_char * 512),
('album', ctypes.c_char * 512),
('year', ctypes.c_int),
('track', ctypes.c_int),
('genre', ctypes.c_char * 32),
]
class _AVbinStreamInfoVideo(ctypes.Structure):
_fields_ = [
('width', ctypes.c_uint),
('height', ctypes.c_uint),
('sample_aspect_num', ctypes.c_int),
('sample_aspect_den', ctypes.c_int),
]
class _AVbinStreamInfoAudio(ctypes.Structure):
_fields_ = [
('sample_format', ctypes.c_int),
('sample_rate', ctypes.c_uint),
('sample_bits', ctypes.c_uint),
('channels', ctypes.c_uint),
]
class _AVbinStreamInfoUnion(ctypes.Union):
_fields_ = [
('video', _AVbinStreamInfoVideo),
('audio', _AVbinStreamInfoAudio),
]
class AVbinStreamInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('type', ctypes.c_int),
('u', _AVbinStreamInfoUnion)
]
class AVbinPacket(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
('timestamp', Timestamp),
('stream_index', ctypes.c_int),
('data', ctypes.POINTER(ctypes.c_uint8)),
('size', ctypes.c_size_t),
]
AVbinLogCallback = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p)
av.avbin_get_version.restype = ctypes.c_int
av.avbin_get_ffmpeg_revision.restype = ctypes.c_int
av.avbin_get_audio_buffer_size.restype = ctypes.c_size_t
av.avbin_have_feature.restype = ctypes.c_int
av.avbin_have_feature.argtypes = [ctypes.c_char_p]
av.avbin_init.restype = AVbinResult
av.avbin_set_log_level.restype = AVbinResult
av.avbin_set_log_level.argtypes = [AVbinLogLevel]
av.avbin_set_log_callback.argtypes = [AVbinLogCallback]
av.avbin_open_filename.restype = AVbinFileP
av.avbin_open_filename.argtypes = [ctypes.c_char_p]
av.avbin_close_file.argtypes = [AVbinFileP]
av.avbin_seek_file.argtypes = [AVbinFileP, Timestamp]
av.avbin_file_info.argtypes = [AVbinFileP, ctypes.POINTER(AVbinFileInfo)]
av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int,
ctypes.POINTER(AVbinStreamInfo)]
av.avbin_open_stream.restype = ctypes.c_void_p
av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int]
av.avbin_close_stream.argtypes = [AVbinStreamP]
av.avbin_read.argtypes = [AVbinFileP, ctypes.POINTER(AVbinPacket)]
av.avbin_read.restype = AVbinResult
av.avbin_decode_audio.restype = ctypes.c_int
av.avbin_decode_audio.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p, ctypes.POINTER(ctypes.c_int)]
av.avbin_decode_video.restype = ctypes.c_int
av.avbin_decode_video.argtypes = [AVbinStreamP,
ctypes.c_void_p, ctypes.c_size_t,
ctypes.c_void_p]
def get_version():
return av.avbin_get_version()
class AVbinException(MediaFormatException):
pass
def timestamp_from_avbin(timestamp):
return float(timestamp) / 1000000
def timestamp_to_avbin(timestamp):
return int(timestamp * 1000000)
class BufferedPacket(object):
def __init__(self, packet):
self.timestamp = packet.timestamp
self.stream_index = packet.stream_index
self.data = (ctypes.c_uint8 * packet.size)()
self.size = packet.size
ctypes.memmove(self.data, packet.data, self.size)
class BufferedImage(object):
def __init__(self, image, timestamp):
self.image = image
self.timestamp = timestamp
class AVbinSource(StreamingSource):
def __init__(self, filename, file=None):
if file is not None:
raise NotImplementedError('TODO: Load from file stream')
self._file = av.avbin_open_filename(filename)
if not self._file:
raise AVbinException('Could not open "%s"' % filename)
self._video_stream = None
self._audio_stream = None
file_info = AVbinFileInfo()
file_info.structure_size = ctypes.sizeof(file_info)
av.avbin_file_info(self._file, ctypes.byref(file_info))
self._duration = timestamp_from_avbin(file_info.duration)
# Pick the first video and audio streams found, ignore others.
for i in range(file_info.n_streams):
info = AVbinStreamInfo()
info.structure_size = ctypes.sizeof(info)
av.avbin_stream_info(self._file, i, info)
if (info.type == AVBIN_STREAM_TYPE_VIDEO and
not self._video_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.video_format = VideoFormat(
width=info.u.video.width,
height=info.u.video.height)
if info.u.video.sample_aspect_num != 0:
self.video_format.sample_aspect = (
float(info.u.video.sample_aspect_num) /
info.u.video.sample_aspect_den)
self._video_stream = stream
self._video_stream_index = i
elif (info.type == AVBIN_STREAM_TYPE_AUDIO and
info.u.audio.sample_bits in (8, 16) and
info.u.audio.channels in (1, 2) and
not self._audio_stream):
stream = av.avbin_open_stream(self._file, i)
if not stream:
continue
self.audio_format = AudioFormat(
channels=info.u.audio.channels,
sample_size=info.u.audio.sample_bits,
sample_rate=info.u.audio.sample_rate)
self._audio_stream = stream
self._audio_stream_index = i
self._packet = AVbinPacket()
self._packet.structure_size = ctypes.sizeof(self._packet)
self._packet.stream_index = -1
self._buffered_packets = []
self._buffer_streams = []
self._buffered_images = []
if self.audio_format:
self._audio_packet_ptr = 0
self._audio_packet_size = 0
self._audio_packet_timestamp = 0
self._audio_buffer = \
(ctypes.c_uint8 * av.avbin_get_audio_buffer_size())()
self._buffer_streams.append(self._audio_stream_index)
if self.video_format:
self._buffer_streams.append(self._video_stream_index)
self._force_next_video_image = True
self._last_video_timestamp = None
def __del__(self):
try:
if self._video_stream:
av.avbin_close_stream(self._video_stream)
if self._audio_stream:
av.avbin_close_stream(self._audio_stream)
av.avbin_close_file(self._file)
except:
pass
def _seek(self, timestamp):
av.avbin_seek_file(self._file, timestamp_to_avbin(timestamp))
self._buffered_packets = []
self._buffered_images = []
self._audio_packet_size = 0
self._force_next_video_image = True
self._last_video_timestamp = None
def _get_packet_for_stream(self, stream_index):
# See if a packet has already been buffered
for packet in self._buffered_packets:
if packet.stream_index == stream_index:
self._buffered_packets.remove(packet)
return packet
# XXX This is ugly and needs tuning per-codec. Replace with an
# explicit API for disabling unused streams (e.g. for silent driver).
'''
# Make sure we're not buffering packets that are being ignored
for buffer in self._buffered_packets, self._buffered_images:
if len(buffer) > 20:
buffer.pop(0)
'''
# Read more packets, buffering each interesting one until we get to
# the one we want or reach end of file.
while True:
if av.avbin_read(self._file, self._packet) != AVBIN_RESULT_OK:
return None
elif self._packet.stream_index == stream_index:
return self._packet
elif self._packet.stream_index == self._video_stream_index:
buffered_image = self._decode_video_packet(self._packet)
if buffered_image:
self._buffered_images.append(buffered_image)
elif self._packet.stream_index in self._buffer_streams:
self._buffered_packets.append(BufferedPacket(self._packet))
def _get_audio_data(self, bytes):
# XXX bytes currently ignored
while True:
while self._audio_packet_size > 0:
size_out = ctypes.c_int(len(self._audio_buffer))
#print self._audio_stream, self._audio_packet_ptr, self._audio_packet_size, self._audio_buffer, size_out
used = av.avbin_decode_audio(self._audio_stream,
self._audio_packet_ptr, self._audio_packet_size,
self._audio_buffer, size_out)
if used < 0:
self._audio_packet_size = 0
break
self._audio_packet_ptr.value += used
self._audio_packet_size -= used
if size_out.value <= 0:
continue
buffer = ctypes.string_at(self._audio_buffer, size_out)
duration = \
float(len(buffer)) / self.audio_format.bytes_per_second
timestamp = self._audio_packet_timestamp
self._audio_packet_timestamp += duration
return AudioData(buffer, len(buffer), timestamp, duration)
packet = self._get_packet_for_stream(self._audio_stream_index)
if not packet:
return None
self._audio_packet_timestamp = \
timestamp_from_avbin(packet.timestamp)
self._audio_packet = packet # keep from GC
self._audio_packet_ptr = ctypes.cast(packet.data,
ctypes.c_void_p)
self._audio_packet_size = packet.size
def _init_texture(self, player):
if not self.video_format:
return
width = self.video_format.width
height = self.video_format.height
if gl_info.have_extension('GL_ARB_texture_rectangle'):
texture = image.Texture.create_for_size(
gl.GL_TEXTURE_RECTANGLE_ARB, width, height,
internalformat=gl.GL_RGB)
else:
texture = image.Texture.create_for_size(
gl.GL_TEXTURE_2D, width, height, internalformat=gl.GL_RGB)
if texture.width != width or texture.height != height:
texture = texture.get_region(0, 0, width, height)
player._texture = texture
# Flip texture coords (good enough for simple apps).
t = list(player._texture.tex_coords)
player._texture.tex_coords = t[9:12] + t[6:9] + t[3:6] + t[:3]
def _decode_video_packet(self, packet):
timestamp = timestamp_from_avbin(packet.timestamp)
width = self.video_format.width
height = self.video_format.height
pitch = width * 3
buffer = (ctypes.c_uint8 * (pitch * height))()
result = av.avbin_decode_video(self._video_stream,
packet.data, packet.size,
buffer)
if result < 0:
return None
return BufferedImage(
image.ImageData(width, height, 'RGB', buffer, pitch),
timestamp)
def _next_image(self):
img = None
while not img:
packet = self._get_packet_for_stream(self._video_stream_index)
if not packet:
return
img = self._decode_video_packet(packet)
return img
def get_next_video_timestamp(self):
if not self.video_format:
return
try:
img = self._buffered_images[0]
except IndexError:
img = self._next_image()
self._buffered_images.append(img)
if img:
return img.timestamp
def get_next_video_frame(self):
if not self.video_format:
return
try:
img = self._buffered_images.pop(0)
except IndexError:
img = self._next_image()
if img:
self._last_video_timestamp = img.timestamp
self._force_next_video_image = False
return img.image
def _update_texture(self, player, timestamp):
if not self.video_format:
return
if self._last_video_timestamp > timestamp:
return
img = None
i = 0
while (not img or
(img.timestamp < timestamp and
not self._force_next_video_image) ):
if self._buffered_images:
img = self._buffered_images.pop(0)
else:
packet = self._get_packet_for_stream(self._video_stream_index)
if not packet:
return
img = self._decode_video_packet(packet)
# Emergency loop exit when timestamps are bad
i += 1
if i > 60:
break
if img:
player._texture.blit_into(img.image, 0, 0, 0)
self._last_video_timestamp = img.timestamp
self._force_next_video_image = False
def _release_texture(self, player):
player._texture = None
av.avbin_init()
if pyglet.options['debug_media']:
av.avbin_set_log_level(AVBIN_LOG_DEBUG)
else:
av.avbin_set_log_level(AVBIN_LOG_QUIET)
| bsd-3-clause | 28ea877778d62d26d5d919ba7c244cd1 | 34.415094 | 120 | 0.593915 | 3.80045 | false | false | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/font/freetype_lib.py | 5 | 14036 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: freetype_lib.py 2084 2008-05-27 12:42:19Z Alex.Holkner $'
from ctypes import *
import pyglet.lib
_libfreetype = pyglet.lib.load_library('freetype')
_font_data = {}
def _get_function(name, argtypes, rtype):
try:
func = getattr(_libfreetype, name)
func.argtypes = argtypes
func.restype = rtype
return func
except AttributeError, e:
raise ImportError(e)
FT_Done_FreeType = _get_function('FT_Done_FreeType', [c_void_p], None)
FT_Done_Face = _get_function('FT_Done_Face', [c_void_p], None)
class FT_LibraryRec(Structure):
_fields_ = [
('dummy', c_int),
]
def __del__(self):
global _library
try:
FT_Done_FreeType(byref(self))
_library = None
except:
pass
FT_Library = POINTER(FT_LibraryRec)
class FT_Glyph_Metrics(Structure):
_fields_ = [
('width', c_long),
('height', c_long),
('horiBearingX', c_long),
('horiBearingY', c_long),
('horiAdvance', c_long),
('vertBearingX', c_long),
('vertBearingY', c_long),
('vertAdvance', c_long),
]
def dump(self):
for (name, type) in self._fields_:
print 'FT_Glyph_Metrics', name, `getattr(self, name)`
class FT_Generic(Structure):
_fields_ = [('data', c_void_p), ('finalizer', c_void_p)]
class FT_BBox(Structure):
_fields_ = [('xMin', c_long), ('yMin', c_long), ('xMax', c_long),
('yMax', c_long)]
class FT_Vector(Structure):
_fields_ = [('x', c_long), ('y', c_long)]
class FT_Bitmap(Structure):
_fields_ = [
('rows', c_int),
('width', c_int),
('pitch', c_int),
# declaring buffer as c_char_p confuses ctypes, poor dear
('buffer', POINTER(c_ubyte)),
('num_grays', c_short),
('pixel_mode', c_ubyte),
('palette_mode', c_char),
('palette', c_void_p),
]
class FT_Outline(Structure):
_fields_ = [
('n_contours', c_short), # number of contours in glyph
('n_points', c_short), # number of points in the glyph
('points', POINTER(FT_Vector)), # the outline's points
('tags', c_char_p), # the points flags
('contours', POINTER(c_short)), # the contour end points
('flags', c_int), # outline masks
]
class FT_GlyphSlotRec(Structure):
_fields_ = [
('library', FT_Library),
('face', c_void_p),
('next', c_void_p),
('reserved', c_uint),
('generic', FT_Generic),
('metrics', FT_Glyph_Metrics),
('linearHoriAdvance', c_long),
('linearVertAdvance', c_long),
('advance', FT_Vector),
('format', c_int),
('bitmap', FT_Bitmap),
('bitmap_left', c_int),
('bitmap_top', c_int),
('outline', FT_Outline),
('num_subglyphs', c_uint),
('subglyphs', c_void_p),
('control_data', c_void_p),
('control_len', c_long),
('lsb_delta', c_long),
('rsb_delta', c_long),
('other', c_void_p),
('internal', c_void_p),
]
FT_GlyphSlot = POINTER(FT_GlyphSlotRec)
class FT_Size_Metrics(Structure):
_fields_ = [
('x_ppem', c_ushort), # horizontal pixels per EM
('y_ppem', c_ushort), # vertical pixels per EM
('x_scale', c_long), # two scales used to convert font units
('y_scale', c_long), # to 26.6 frac. pixel coordinates
('ascender', c_long), # ascender in 26.6 frac. pixels
('descender', c_long), # descender in 26.6 frac. pixels
('height', c_long), # text height in 26.6 frac. pixels
('max_advance', c_long), # max horizontal advance, in 26.6 pixels
]
class FT_SizeRec(Structure):
_fields_ = [
('face', c_void_p),
('generic', FT_Generic),
('metrics', FT_Size_Metrics),
('internal', c_void_p),
]
FT_Size = POINTER(FT_SizeRec)
class FT_Bitmap_Size(Structure):
_fields_ = [
('height', c_ushort),
('width', c_ushort),
('size', c_long),
('x_ppem', c_long),
('y_ppem', c_long),
]
# face_flags values
FT_FACE_FLAG_SCALABLE = 1 << 0
FT_FACE_FLAG_FIXED_SIZES = 1 << 1
FT_FACE_FLAG_FIXED_WIDTH = 1 << 2
FT_FACE_FLAG_SFNT = 1 << 3
FT_FACE_FLAG_HORIZONTAL = 1 << 4
FT_FACE_FLAG_VERTICAL = 1 << 5
FT_FACE_FLAG_KERNING = 1 << 6
FT_FACE_FLAG_FAST_GLYPHS = 1 << 7
FT_FACE_FLAG_MULTIPLE_MASTERS = 1 << 8
FT_FACE_FLAG_GLYPH_NAMES = 1 << 9
FT_FACE_FLAG_EXTERNAL_STREAM = 1 << 10
FT_FACE_FLAG_HINTER = 1 << 11
class FT_FaceRec(Structure):
_fields_ = [
('num_faces', c_long),
('face_index', c_long),
('face_flags', c_long),
('style_flags', c_long),
('num_glyphs', c_long),
('family_name', c_char_p),
('style_name', c_char_p),
('num_fixed_sizes', c_int),
('available_sizes', POINTER(FT_Bitmap_Size)),
('num_charmaps', c_int),
('charmaps', c_void_p),
('generic', FT_Generic),
('bbox', FT_BBox),
('units_per_EM', c_ushort),
('ascender', c_short),
('descender', c_short),
('height', c_short),
('max_advance_width', c_short),
('max_advance_height', c_short),
('underline_position', c_short),
('underline_thickness', c_short),
('glyph', FT_GlyphSlot),
('size', FT_Size),
('charmap', c_void_p),
('driver', c_void_p),
('memory', c_void_p),
('stream', c_void_p),
('sizes_list_head', c_void_p),
('sizes_list_tail', c_void_p),
('autohint', FT_Generic),
('extensions', c_void_p),
('internal', c_void_p),
]
def dump(self):
for (name, type) in self._fields_:
print 'FT_FaceRec', name, `getattr(self, name)`
def has_kerning(self):
return self.face_flags & FT_FACE_FLAG_KERNING
FT_Face = POINTER(FT_FaceRec)
class Error(Exception):
def __init__(self, message, errcode):
self.message = message
self.errcode = errcode
def __str__(self):
return '%s: %s (%s)'%(self.__class__.__name__, self.message,
self._ft_errors.get(self.errcode, 'unknown error'))
_ft_errors = {
0x00: "no error" ,
0x01: "cannot open resource" ,
0x02: "unknown file format" ,
0x03: "broken file" ,
0x04: "invalid FreeType version" ,
0x05: "module version is too low" ,
0x06: "invalid argument" ,
0x07: "unimplemented feature" ,
0x08: "broken table" ,
0x09: "broken offset within table" ,
0x10: "invalid glyph index" ,
0x11: "invalid character code" ,
0x12: "unsupported glyph image format" ,
0x13: "cannot render this glyph format" ,
0x14: "invalid outline" ,
0x15: "invalid composite glyph" ,
0x16: "too many hints" ,
0x17: "invalid pixel size" ,
0x20: "invalid object handle" ,
0x21: "invalid library handle" ,
0x22: "invalid module handle" ,
0x23: "invalid face handle" ,
0x24: "invalid size handle" ,
0x25: "invalid glyph slot handle" ,
0x26: "invalid charmap handle" ,
0x27: "invalid cache manager handle" ,
0x28: "invalid stream handle" ,
0x30: "too many modules" ,
0x31: "too many extensions" ,
0x40: "out of memory" ,
0x41: "unlisted object" ,
0x51: "cannot open stream" ,
0x52: "invalid stream seek" ,
0x53: "invalid stream skip" ,
0x54: "invalid stream read" ,
0x55: "invalid stream operation" ,
0x56: "invalid frame operation" ,
0x57: "nested frame access" ,
0x58: "invalid frame read" ,
0x60: "raster uninitialized" ,
0x61: "raster corrupted" ,
0x62: "raster overflow" ,
0x63: "negative height while rastering" ,
0x70: "too many registered caches" ,
0x80: "invalid opcode" ,
0x81: "too few arguments" ,
0x82: "stack overflow" ,
0x83: "code overflow" ,
0x84: "bad argument" ,
0x85: "division by zero" ,
0x86: "invalid reference" ,
0x87: "found debug opcode" ,
0x88: "found ENDF opcode in execution stream" ,
0x89: "nested DEFS" ,
0x8A: "invalid code range" ,
0x8B: "execution context too long" ,
0x8C: "too many function definitions" ,
0x8D: "too many instruction definitions" ,
0x8E: "SFNT font table missing" ,
0x8F: "horizontal header (hhea, table missing" ,
0x90: "locations (loca, table missing" ,
0x91: "name table missing" ,
0x92: "character map (cmap, table missing" ,
0x93: "horizontal metrics (hmtx, table missing" ,
0x94: "PostScript (post, table missing" ,
0x95: "invalid horizontal metrics" ,
0x96: "invalid character map (cmap, format" ,
0x97: "invalid ppem value" ,
0x98: "invalid vertical metrics" ,
0x99: "could not find context" ,
0x9A: "invalid PostScript (post, table format" ,
0x9B: "invalid PostScript (post, table" ,
0xA0: "opcode syntax error" ,
0xA1: "argument stack underflow" ,
0xA2: "ignore" ,
0xB0: "`STARTFONT' field missing" ,
0xB1: "`FONT' field missing" ,
0xB2: "`SIZE' field missing" ,
0xB3: "`CHARS' field missing" ,
0xB4: "`STARTCHAR' field missing" ,
0xB5: "`ENCODING' field missing" ,
0xB6: "`BBX' field missing" ,
0xB7: "`BBX' too big" ,
}
FT_LOAD_RENDER = 0x4
FT_F26Dot6 = c_long
FT_Init_FreeType = _get_function('FT_Init_FreeType',
[POINTER(FT_Library)], c_int)
FT_New_Memory_Face = _get_function('FT_New_Memory_Face',
[FT_Library, POINTER(c_byte), c_long, c_long, POINTER(FT_Face)], c_int)
FT_New_Face = _get_function('FT_New_Face',
[FT_Library, c_char_p, c_long, POINTER(FT_Face)], c_int)
FT_Set_Pixel_Sizes = _get_function('FT_Set_Pixel_Sizes',
[FT_Face, c_uint, c_uint], c_int)
FT_Set_Char_Size = _get_function('FT_Set_Char_Size',
[FT_Face, FT_F26Dot6, FT_F26Dot6, c_uint, c_uint], c_int)
FT_Load_Glyph = _get_function('FT_Load_Glyph',
[FT_Face, c_uint, c_int32], c_int)
FT_Get_Char_Index = _get_function('FT_Get_Char_Index',
[FT_Face, c_ulong], c_uint)
FT_Load_Char = _get_function('FT_Load_Char',
[FT_Face, c_ulong, c_int], c_int)
FT_Get_Kerning = _get_function('FT_Get_Kerning',
[FT_Face, c_uint, c_uint, c_uint, POINTER(FT_Vector)], c_int)
# SFNT interface
class FT_SfntName(Structure):
_fields_ = [
('platform_id', c_ushort),
('encoding_id', c_ushort),
('language_id', c_ushort),
('name_id', c_ushort),
('string', POINTER(c_byte)),
('string_len', c_uint)
]
FT_Get_Sfnt_Name_Count = _get_function('FT_Get_Sfnt_Name_Count',
[FT_Face], c_uint)
FT_Get_Sfnt_Name = _get_function('FT_Get_Sfnt_Name',
[FT_Face, c_uint, POINTER(FT_SfntName)], c_int)
TT_PLATFORM_MICROSOFT = 3
TT_MS_ID_UNICODE_CS = 1
TT_NAME_ID_COPYRIGHT = 0
TT_NAME_ID_FONT_FAMILY = 1
TT_NAME_ID_FONT_SUBFAMILY = 2
TT_NAME_ID_UNIQUE_ID = 3
TT_NAME_ID_FULL_NAME = 4
TT_NAME_ID_VERSION_STRING = 5
TT_NAME_ID_PS_NAME = 6
TT_NAME_ID_TRADEMARK = 7
TT_NAME_ID_MANUFACTURER = 8
TT_NAME_ID_DESIGNER = 9
TT_NAME_ID_DESCRIPTION = 10
TT_NAME_ID_VENDOR_URL = 11
TT_NAME_ID_DESIGNER_URL = 12
TT_NAME_ID_LICENSE = 13
TT_NAME_ID_LICENSE_URL = 14
TT_NAME_ID_PREFERRED_FAMILY = 16
TT_NAME_ID_PREFERRED_SUBFAMILY= 17
TT_NAME_ID_MAC_FULL_NAME = 18
TT_NAME_ID_CID_FINDFONT_NAME = 20
_library = None
def ft_get_library():
global _library
if not _library:
_library = FT_Library()
error = FT_Init_FreeType(byref(_library))
if error:
raise FontException(
'an error occurred during library initialization', error)
return _library
| bsd-3-clause | c61c4e415d42b65b34a4d2bdece87b35 | 31.871194 | 78 | 0.566472 | 3.249826 | false | false | false | false |
mattpap/sympy-polys | sympy/mpmath/libmp/libmpf.py | 3 | 42867 | """
Low-level functions for arbitrary-precision floating-point arithmetic.
"""
__docformat__ = 'plaintext'
import math
from bisect import bisect
# Importing random is slow
#from random import getrandbits
getrandbits = None
from backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
BACKEND, STRICT, gmpy, sage, sage_utils)
from libintmath import (giant_steps,
trailtable, bctable, lshift, rshift, bitcount, trailing,
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
bin_to_radix)
# We don't pickle tuples directly for the following reasons:
# 1: pickle uses str() for ints, which is inefficient when they are large
# 2: pickle doesn't work for gmpy mpzs
# Both problems are solved by using hex()
if BACKEND == 'sage':
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man), exp, bc
else:
def to_pickable(x):
sign, man, exp, bc = x
return sign, hex(man)[2:], exp, bc
def from_pickable(x):
sign, man, exp, bc = x
return (sign, MPZ(man, 16), exp, bc)
class ComplexResult(ValueError):
pass
# All supported rounding modes
round_nearest = intern('n')
round_floor = intern('f')
round_ceiling = intern('c')
round_up = intern('u')
round_down = intern('d')
round_fast = round_down
def prec_to_dps(n):
"""Return number of accurate decimals that can be represented
with a precision of n bits."""
return max(1, int(round(int(n)/3.3219280948873626)-1))
def dps_to_prec(n):
"""Return the number of bits required to represent n decimals
accurately."""
return max(1, int(round((int(n)+1)*3.3219280948873626)))
def repr_dps(n):
"""Return the number of decimal digits required to represent
a number with n-bit precision so that it can be uniquely
reconstructed from the representation."""
dps = prec_to_dps(n)
if dps == 15:
return 17
return dps + 3
#----------------------------------------------------------------------------#
# Some commonly needed float values #
#----------------------------------------------------------------------------#
# Regular number format:
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
fzero = (0, MPZ_ZERO, 0, 0)
fnzero = (1, MPZ_ZERO, 0, 0)
fone = (0, MPZ_ONE, 0, 1)
fnone = (1, MPZ_ONE, 0, 1)
ftwo = (0, MPZ_ONE, 1, 1)
ften = (0, MPZ_FIVE, 1, 3)
fhalf = (0, MPZ_ONE, -1, 1)
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
fnan = (0, MPZ_ZERO, -123, -1)
finf = (0, MPZ_ZERO, -456, -2)
fninf = (1, MPZ_ZERO, -789, -3)
# Was 1e1000; this is broken in Python 2.4
math_float_inf = 1e300 * 1e300
#----------------------------------------------------------------------------#
# Rounding #
#----------------------------------------------------------------------------#
# This function can be used to round a mantissa generally. However,
# we will try to do most rounding inline for efficiency.
def round_int(x, n, rnd):
if rnd == round_nearest:
if x >= 0:
t = x >> (n-1)
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
return (t>>1)+1
else:
return t>>1
else:
return -round_int(-x, n, rnd)
if rnd == round_floor:
return x >> n
if rnd == round_ceiling:
return -((-x) >> n)
if rnd == round_down:
if x >= 0:
return x >> n
return -((-x) >> n)
if rnd == round_up:
if x >= 0:
return -((-x) >> n)
return x >> n
# These masks are used to pick out segments of numbers to determine
# which direction to round when rounding to nearest.
class h_mask_big:
def __getitem__(self, n):
return (MPZ_ONE<<(n-1))-1
h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
h_mask = [h_mask_big(), h_mask_small]
# The >> operator rounds to floor. shifts_down[rnd][sign]
# tells whether this is the right direction to use, or if the
# number should be negated before shifting
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
round_down:(1,1), round_up:(0,0)}
#----------------------------------------------------------------------------#
# Normalization of raw mpfs #
#----------------------------------------------------------------------------#
# This function is called almost every time an mpf is created.
# It has been optimized accordingly.
def _normalize(sign, man, exp, bc, prec, rnd):
"""
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
normalized mantissa. The mantissa is rounded in the specified
direction if its size exceeds the precision. Trailing zero bits
are also stripped from the mantissa to ensure that the
representation is canonical.
Conditions on the input:
* The input must represent a regular (finite) number
* The sign bit must be 0 or 1
* The mantissa must be positive
* The exponent must be an integer
* The bitcount must be exact
If these conditions are not met, use from_man_exp, mpf_pos, or any
of the conversion functions to create normalized raw mpf tuples.
"""
if not man:
return fzero
# Cut mantissa down to size if larger than target precision
n = bc - prec
if n > 0:
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def _normalize1(sign, man, exp, bc, prec, rnd):
"""same as normalize, but with the added condition that
man is odd or zero
"""
if not man:
return fzero
if bc <= prec:
return sign, man, exp, bc
n = bc - prec
if rnd == round_nearest:
t = man >> (n-1)
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
man = (t>>1)+1
else:
man = t>>1
elif shifts_down[rnd][sign]:
man >>= n
else:
man = -((-man)>>n)
exp += n
bc = prec
# Strip trailing bits
if not man & 1:
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
# Bit count can be wrong if the input mantissa was 1 less than
# a power of 2 and got rounded up, thereby adding an extra bit.
# With trailing bits removed, all powers of two have mantissa 1,
# so this is easy to check for.
if man == 1:
bc = 1
return sign, man, exp, bc
def strict_normalize(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in (int, long)
assert type(exp) in (int, long)
assert bc == bitcount(man)
return _normalize(sign, man, exp, bc, prec, rnd)
def strict_normalize1(sign, man, exp, bc, prec, rnd):
"""Additional checks on the components of an mpf. Enable tests by setting
the environment variable MPMATH_STRICT to Y."""
assert type(man) == MPZ_TYPE
assert type(bc) in (int, long)
assert type(exp) in (int, long)
assert bc == bitcount(man)
assert (not man) or (man & 1)
return _normalize1(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
_normalize = gmpy._mpmath_normalize
_normalize1 = gmpy._mpmath_normalize
if BACKEND == 'sage':
_normalize = _normalize1 = sage_utils.normalize
if STRICT:
normalize = strict_normalize
normalize1 = strict_normalize1
else:
normalize = _normalize
normalize1 = _normalize1
#----------------------------------------------------------------------------#
# Conversion functions #
#----------------------------------------------------------------------------#
def from_man_exp(man, exp, prec=None, rnd=round_fast):
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
If no precision is specified, the mantissa is stored exactly."""
man = MPZ(man)
sign = 0
if man < 0:
sign = 1
man = -man
if man < 1024:
bc = bctable[int(man)]
else:
bc = bitcount(man)
if not prec:
if not man:
return fzero
if not man & 1:
if man & 2:
return (sign, man >> 1, exp + 1, bc - 1)
t = trailtable[int(man & 255)]
if not t:
while not man & 255:
man >>= 8
exp += 8
bc -= 8
t = trailtable[int(man & 255)]
man >>= t
exp += t
bc -= t
return (sign, man, exp, bc)
return normalize(sign, man, exp, bc, prec, rnd)
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
from_man_exp = gmpy._mpmath_create
if BACKEND == 'sage':
from_man_exp = sage_utils.from_man_exp
def from_int(n, prec=0, rnd=round_fast):
"""Create a raw mpf from an integer. If no precision is specified,
the mantissa is stored exactly."""
if not prec:
if n in int_cache:
return int_cache[n]
return from_man_exp(n, 0, prec, rnd)
def to_man_exp(s):
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("mantissa and exponent are undefined for %s" % man)
return man, exp
def to_int(s, rnd=None):
"""Convert a raw mpf to the nearest int. Rounding is done down by
default (same as int(float) in Python), but can be changed. If the
input is inf/nan, an exception is raised."""
sign, man, exp, bc = s
if (not man) and exp:
raise ValueError("cannot convert %s to int" % man)
if exp >= 0:
if sign:
return (-man) << exp
return man << exp
# Make default rounding fast
if not rnd:
if sign:
return -(man >> (-exp))
else:
return man >> (-exp)
if sign:
return round_int(-man, -exp, rnd)
else:
return round_int(man, -exp, rnd)
def mpf_round_int(s, rnd):
sign, man, exp, bc = s
if (not man) and exp:
return s
if exp >= 0:
return s
mag = exp+bc
if mag < 1:
if rnd == round_ceiling:
if sign: return fzero
else: return fone
elif rnd == round_floor:
if sign: return fnone
else: return fzero
elif rnd == round_nearest:
if mag < 0 or man == MPZ_ONE: return fzero
elif sign: return fnone
else: return fone
else:
raise NotImplementedError
return mpf_pos(s, min(bc, mag), rnd)
def mpf_floor(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_floor)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_ceil(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_ceiling)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_nint(s, prec=0, rnd=round_fast):
v = mpf_round_int(s, round_nearest)
if prec:
v = mpf_pos(v, prec, rnd)
return v
def mpf_frac(s, prec=0, rnd=round_fast):
return mpf_sub(s, mpf_floor(s), prec, rnd)
def from_float(x, prec=53, rnd=round_fast):
"""Create a raw mpf from a Python float, rounding if necessary.
If prec >= 53, the result is guaranteed to represent exactly the
same number as the input. If prec is not specified, use prec=53."""
# frexp only raises an exception for nan on some platforms
if x != x:
return fnan
# in Python2.5 math.frexp gives an exception for float infinity
# in Python2.6 it returns (float infinity, 0)
try:
m, e = math.frexp(x)
except:
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return fnan
if x == math_float_inf: return finf
if x == -math_float_inf: return fninf
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
def to_float(s, strict=False):
"""
Convert a raw mpf to a Python float. The result is exact if the
bitcount of s is <= 53 and no underflow/overflow occurs.
If the number is too large or too small to represent as a regular
float, it will be converted to inf or 0.0. Setting strict=True
forces an OverflowError to be raised instead.
"""
sign, man, exp, bc = s
if not man:
if s == fzero: return 0.0
if s == finf: return math_float_inf
if s == fninf: return -math_float_inf
return math_float_inf/math_float_inf
if sign:
man = -man
try:
if bc < 100:
return math.ldexp(man, exp)
# Try resizing the mantissa. Overflow may still happen here.
n = bc - 53
m = man >> n
return math.ldexp(m, exp + n)
except OverflowError:
if strict:
raise
# Overflow to infinity
if exp + bc > 0:
if sign:
return -math_float_inf
else:
return math_float_inf
# Underflow to zero
return 0.0
def from_rational(p, q, prec, rnd=round_fast):
"""Create a raw mpf from a rational number p/q, round if
necessary."""
return mpf_div(from_int(p), from_int(q), prec, rnd)
def to_rational(s):
"""Convert a raw mpf to a rational number. Return integers (p, q)
such that s = p/q exactly."""
sign, man, exp, bc = s
if sign:
man = -man
if bc == -1:
raise ValueError("cannot convert %s to a rational number" % man)
if exp >= 0:
return man * (1<<exp), 1
else:
return man, 1<<(-exp)
def to_fixed(s, prec):
"""Convert a raw mpf to a fixed-point big integer"""
sign, man, exp, bc = s
offset = exp + prec
if sign:
if offset >= 0: return (-man) << offset
else: return (-man) >> (-offset)
else:
if offset >= 0: return man << offset
else: return man >> (-offset)
##############################################################################
##############################################################################
#----------------------------------------------------------------------------#
# Arithmetic operations, etc. #
#----------------------------------------------------------------------------#
def mpf_rand(prec):
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
in the mantissa."""
global getrandbits
if not getrandbits:
import random
getrandbits = random.getrandbits
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
def mpf_eq(s, t):
"""Test equality of two raw mpfs. This is simply tuple comparion
unless either number is nan, in which case the result is False."""
if not s[1] or not t[1]:
if s == fnan or t == fnan:
return False
return s == t
def mpf_hash(s):
try:
# Try to be compatible with hash values for floats and ints
return hash(to_float(s, strict=1))
except OverflowError:
# We must unfortunately sacrifice compatibility with ints here. We
# could do hash(man << exp) when the exponent is positive, but
# this would cause unreasonable inefficiency for large numbers.
return hash(s)
def mpf_cmp(s, t):
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
and 1 if s > t. (Same convention as Python's cmp() function.)"""
# In principle, a comparison amounts to determining the sign of s-t.
# A full subtraction is relatively slow, however, so we first try to
# look at the components.
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
# Handle zeros and special numbers
if not sman or not tman:
if s == fzero: return -mpf_sign(t)
if t == fzero: return mpf_sign(s)
if s == t: return 0
# Follow same convention as Python's cmp for float nan
if t == fnan: return 1
if s == finf: return 1
if t == fninf: return 1
return -1
# Different sides of zero
if ssign != tsign:
if not ssign: return 1
return -1
# This reduces to direct integer comparison
if sexp == texp:
if sman == tman:
return 0
if sman > tman:
if ssign: return -1
else: return 1
else:
if ssign: return 1
else: return -1
# Check position of the highest set bit in each number. If
# different, there is certainly an inequality.
a = sbc + sexp
b = tbc + texp
if ssign:
if a < b: return 1
if a > b: return -1
else:
if a < b: return -1
if a > b: return 1
# Both numbers have the same highest bit. Subtract to find
# how the lower bits compare.
delta = mpf_sub(s, t, 5, round_floor)
if delta[0]:
return -1
return 1
def mpf_lt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) < 0
def mpf_le(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) <= 0
def mpf_gt(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) > 0
def mpf_ge(s, t):
if s == fnan or t == fnan:
return False
return mpf_cmp(s, t) >= 0
def mpf_min_max(seq):
min = max = seq[0]
for x in seq[1:]:
if mpf_lt(x, min): min = x
if mpf_gt(x, max): max = x
return min, max
def mpf_pos(s, prec=0, rnd=round_fast):
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
precision)."""
if prec:
sign, man, exp, bc = s
if (not man) and exp:
return s
return normalize1(sign, man, exp, bc, prec, rnd)
return s
def mpf_neg(s, prec=None, rnd=round_fast):
"""Negate a raw mpf (return -s), rounding the result to the
specified precision. The prec argument can be omitted to do the
operation exactly."""
sign, man, exp, bc = s
if not man:
if exp:
if s == finf: return fninf
if s == fninf: return finf
return s
if not prec:
return (1-sign, man, exp, bc)
return normalize1(1-sign, man, exp, bc, prec, rnd)
def mpf_abs(s, prec=None, rnd=round_fast):
"""Return abs(s) of the raw mpf s, rounded to the specified
precision. The prec argument can be omitted to generate an
exact result."""
sign, man, exp, bc = s
if (not man) and exp:
if s == fninf:
return finf
return s
if not prec:
if sign:
return (0, man, exp, bc)
return s
return normalize1(0, man, exp, bc, prec, rnd)
def mpf_sign(s):
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
sign, man, exp, bc = s
if not man:
if s == finf: return 1
if s == fninf: return -1
return 0
return (-1) ** sign
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
"""
Add the two raw mpf values s and t.
With prec=0, no rounding is performed. Note that this can
produce a very large mantissa (potentially too large to fit
in memory) if exponents are far apart.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
tsign ^= _sub
# Standard case: two nonzero, regular numbers
if sman and tman:
offset = sexp - texp
if offset:
if offset > 0:
# Outside precision range; only need to perturb
if offset > 100 and prec:
delta = sbc + sexp - tbc - texp
if delta > prec + 4:
offset = prec + 4
sman <<= offset
if tsign == ssign: sman += 1
else: sman -= 1
return normalize1(ssign, sman, sexp-offset,
bitcount(sman), prec, rnd)
# Add
if ssign == tsign:
man = tman + (sman << offset)
# Subtract
else:
if ssign: man = tman - (sman << offset)
else: man = (sman << offset) - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
elif offset < 0:
# Outside precision range; only need to perturb
if offset < -100 and prec:
delta = tbc + texp - sbc - sexp
if delta > prec + 4:
offset = prec + 4
tman <<= offset
if ssign == tsign: tman += 1
else: tman -= 1
return normalize1(tsign, tman, texp-offset,
bitcount(tman), prec, rnd)
# Add
if ssign == tsign:
man = sman + (tman << -offset)
# Subtract
else:
if tsign: man = sman - (tman << -offset)
else: man = (tman << -offset) - sman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
# Equal exponents; no shifting necessary
if ssign == tsign:
man = tman + sman
else:
if ssign: man = tman - sman
else: man = sman - tman
if man >= 0:
ssign = 0
else:
man = -man
ssign = 1
bc = bitcount(man)
return normalize(ssign, man, texp, bc, prec or bc, rnd)
# Handle zeros and special numbers
if _sub:
t = mpf_neg(t)
if not sman:
if sexp:
if s == t or tman or not texp:
return s
return fnan
if tman:
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
return t
if texp:
return t
if sman:
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
return s
def mpf_sub(s, t, prec=0, rnd=round_fast):
"""Return the difference of two raw mpfs, s-t. This function is
simply a wrapper of mpf_add that changes the sign of t."""
return mpf_add(s, t, prec, rnd, 1)
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
"""
Sum a list of mpf values efficiently and accurately
(typically no temporary roundoff occurs). If prec=0,
the final result will not be rounded either.
There may be roundoff error or cancellation if extremely
large exponent differences occur.
With absolute=True, sums the absolute values.
"""
man = 0
exp = 0
max_extra_prec = prec*2 or 1000000 # XXX
special = None
for x in xs:
xsign, xman, xexp, xbc = x
if xman:
if xsign and not absolute:
xman = -xman
delta = xexp - exp
if xexp >= exp:
# x much larger than existing sum?
# first: quick test
if (delta > max_extra_prec) and \
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
man = xman
exp = xexp
else:
man += (xman << delta)
else:
delta = -delta
# x much smaller than existing sum?
if delta-xbc > max_extra_prec:
if not man:
man, exp = xman, xexp
else:
man = (man << delta) + xman
exp = xexp
elif xexp:
if absolute:
x = mpf_abs(x)
special = mpf_add(special or fzero, x, 1)
# Will be inf or nan
if special:
return special
return from_man_exp(man, exp, prec, rnd)
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = bitcount(man)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
return normalize(sign, man, exp, bitcount(man), prec, rnd)
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
"""Multiply two raw mpfs"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
sign = ssign ^ tsign
man = sman*tman
if man:
bc = sbc + tbc - 1
bc += int(man>>bc)
if prec:
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
else:
return (sign, man, sexp+texp, bc)
s_special = (not sman) and sexp
t_special = (not tman) and texp
if not s_special and not t_special:
return fzero
if fnan in (s, t): return fnan
if (not tman) and texp: s, t = t, s
if t == fzero: return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
"""Multiply by a Python integer."""
sign, man, exp, bc = s
if not man:
return mpf_mul(s, from_int(n), prec, rnd)
if not n:
return fzero
if n < 0:
sign ^= 1
n = -n
man *= n
# Generally n will be small
if n < 1024:
bc += bctable[int(n)] - 1
else:
bc += bitcount(n) - 1
bc += int(man>>bc)
return normalize(sign, man, exp, bc, prec, rnd)
if BACKEND == 'gmpy':
mpf_mul = gmpy_mpf_mul
mpf_mul_int = gmpy_mpf_mul_int
else:
mpf_mul = python_mpf_mul
mpf_mul_int = python_mpf_mul_int
def mpf_shift(s, n):
"""Quickly multiply the raw mpf s by 2**n without rounding."""
sign, man, exp, bc = s
if not man:
return s
return sign, man, exp+n, bc
def mpf_frexp(x):
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
sign, man, exp, bc = x
if not man:
if x == fzero:
return (fzero, 0)
else:
raise ValueError
return mpf_shift(x, -bc-exp), bc+exp
def mpf_div(s, t, prec, rnd=round_fast):
"""Floating-point division"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if not sman or not tman:
if s == fzero:
if t == fzero: raise ZeroDivisionError
if t == fnan: return fnan
return fzero
if t == fzero:
raise ZeroDivisionError
s_special = (not sman) and sexp
t_special = (not tman) and texp
if s_special and t_special:
return fnan
if s == fnan or t == fnan:
return fnan
if not t_special:
if t == fzero:
return fnan
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
return fzero
sign = ssign ^ tsign
if tman == 1:
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
# Same strategy as for addition: if there is a remainder, perturb
# the result a few bits outside the precision range before rounding
extra = prec - sbc + tbc + 5
if extra < 5:
extra = 5
quot, rem = divmod(sman<<extra, tman)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
"""Floating-point division n/t with a Python integer as numerator"""
sign, man, exp, bc = t
if not n or not man:
return mpf_div(from_int(n), t, prec, rnd)
if n < 0:
sign ^= 1
n = -n
extra = prec + bc + 5
quot, rem = divmod(n<<extra, man)
if rem:
quot = (quot<<1) + 1
extra += 1
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
def mpf_mod(s, t, prec, rnd=round_fast):
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ((not sman) and sexp) or ((not tman) and texp):
return fnan
# Important special case: do nothing if t is larger
if ssign == tsign and texp > sexp+sbc:
return s
# Another important special case: this allows us to do e.g. x % 1.0
# to find the fractional part of x, and it will work when x is huge.
if tman == 1 and sexp > texp+tbc:
return fzero
base = min(sexp, texp)
sman = (-1)**ssign * sman
tman = (-1)**tsign * tman
man = (sman << (sexp-base)) % (tman << (texp-base))
if man >= 0:
sign = 0
else:
man = -man
sign = 1
return normalize(sign, man, base, bitcount(man), prec, rnd)
reciprocal_rnd = {
round_down : round_up,
round_up : round_down,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
negative_rnd = {
round_down : round_down,
round_up : round_up,
round_floor : round_ceiling,
round_ceiling : round_floor,
round_nearest : round_nearest
}
def mpf_pow_int(s, n, prec, rnd=round_fast):
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
sign, man, exp, bc = s
if (not man) and exp:
if s == finf:
if n > 0: return s
if n == 0: return fnan
return fzero
if s == fninf:
if n > 0: return [finf, fninf][n & 1]
if n == 0: return fnan
return fzero
return fnan
n = int(n)
if n == 0: return fone
if n == 1: return mpf_pos(s, prec, rnd)
if n == 2:
_, man, exp, bc = s
if not man:
return fzero
man = man*man
if man == 1:
return (0, MPZ_ONE, exp+exp, 1)
bc = bc + bc - 2
bc += bctable[int(man>>bc)]
return normalize1(0, man, exp+exp, bc, prec, rnd)
if n == -1: return mpf_div(fone, s, prec, rnd)
if n < 0:
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
return mpf_div(fone, inverse, prec, rnd)
result_sign = sign & n
# Use exact integer power when the exact mantissa is small
if man == 1:
return (result_sign, MPZ_ONE, exp*n, 1)
if bc*n < 1000:
man **= n
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
# Use directed rounding all the way through to maintain rigorous
# bounds for interval arithmetic
rounds_down = (rnd == round_nearest) or \
shifts_down[rnd][result_sign]
# Now we perform binary exponentiation. Need to estimate precision
# to avoid rounding errors from temporary operations. Roughly log_2(n)
# operations are performed.
workprec = prec + 4*bitcount(n) + 4
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*man
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
if rounds_down:
pm = pm >> (pbc-workprec)
else:
pm = -((-pm) >> (pbc-workprec))
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
man = man*man
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(man >> bc)]
if bc > workprec:
if rounds_down:
man = man >> (bc-workprec)
else:
man = -((-man) >> (bc-workprec))
exp += bc - workprec
bc = workprec
n = n // 2
return normalize(result_sign, pm, pe, pbc, prec, rnd)
def mpf_perturb(x, eps_sign, prec, rnd):
"""
For nonzero x, calculate x + eps with directed rounding, where
eps < prec relatively and eps has the given sign (0 for
positive, 1 for negative).
With rounding to nearest, this is taken to simply normalize
x to the given precision.
"""
if rnd == round_nearest:
return mpf_pos(x, prec, rnd)
sign, man, exp, bc = x
eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
if sign:
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
else:
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
if away:
return mpf_add(x, eps, prec, rnd)
else:
return mpf_pos(x, prec, rnd)
#----------------------------------------------------------------------------#
# Radix conversion #
#----------------------------------------------------------------------------#
def to_digits_exp(s, dps):
"""Helper function for representing the floating-point number s as
a decimal with dps digits. Returns (sign, string, exponent) where
sign is '' or '-', string is the digit string, and exponent is
the decimal exponent as an int.
If inexact, the decimal representation is rounded toward zero."""
# Extract sign first so it doesn't mess up the string digit count
if s[0]:
sign = '-'
s = mpf_neg(s)
else:
sign = ''
_sign, man, exp, bc = s
if not man:
return '', '0', 0
bitprec = int(dps * math.log(10,2)) + 10
# Cut down to size
# TODO: account for precision when doing this
exp_from_1 = exp + bc
if abs(exp_from_1) > 3500:
from libelefun import mpf_ln2, mpf_ln10
# Set b = int(exp * log(2)/log(10))
# If exp is huge, we must use high-precision arithmetic to
# find the nearest power of ten
expprec = bitcount(abs(exp)) + 5
tmp = from_int(exp)
tmp = mpf_mul(tmp, mpf_ln2(expprec))
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
b = to_int(tmp)
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
_sign, man, exp, bc = s
exponent = b
else:
exponent = 0
# First, calculate mantissa digits by converting to a binary
# fixed-point number and then converting that number to
# a decimal fixed-point number.
fixprec = max(bitprec - exp - bc, 0)
fixdps = int(fixprec / math.log(10,2) + 0.5)
sf = to_fixed(s, fixprec)
sd = bin_to_radix(sf, fixprec, 10, fixdps)
digits = numeral(sd, base=10, size=dps)
exponent += len(digits) - fixdps - 1
return sign, digits, exponent
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
show_zero_exponent=False):
"""
Convert a raw mpf to a decimal floating-point literal with at
most `dps` decimal digits in the mantissa (not counting extra zeros
that may be inserted for visual purposes).
The number will be printed in fixed-point format if the position
of the leading digit is strictly between min_fixed
(default = min(-dps/3,-5)) and max_fixed (default = dps).
To force fixed-point format always, set min_fixed = -inf,
max_fixed = +inf. To force floating-point format, set
min_fixed >= max_fixed.
The literal is formatted so that it can be parsed back to a number
by to_str, float() or Decimal().
"""
# Special numbers
if not s[1]:
if s == fzero:
if dps: t = '0.0'
else: t = '.0'
if show_zero_exponent:
t += 'e+0'
return t
if s == finf: return '+inf'
if s == fninf: return '-inf'
if s == fnan: return 'nan'
raise ValueError
if min_fixed is None: min_fixed = min(-(dps//3), -5)
if max_fixed is None: max_fixed = dps
# to_digits_exp rounds to floor.
# This sometimes kills some instances of "...00001"
sign, digits, exponent = to_digits_exp(s, dps+3)
# No digits: show only .0; round exponent to nearest
if not dps:
if digits[0] in '56789':
exponent += 1
digits = ".0"
else:
# Rounding up kills some instances of "...99999"
if len(digits) > dps and digits[dps] in '56789' and \
(dps < 500 or digits[dps-4:dps] == '9999'):
digits2 = str(int(digits[:dps]) + 1)
if len(digits2) > dps:
digits2 = digits2[:dps]
exponent += 1
digits = digits2
else:
digits = digits[:dps]
# Prettify numbers close to unit magnitude
if min_fixed < exponent < max_fixed:
if exponent < 0:
digits = ("0"*int(-exponent)) + digits
split = 1
else:
split = exponent + 1
if split > dps:
digits += "0"*(split-dps)
exponent = 0
else:
split = 1
digits = (digits[:split] + "." + digits[split:])
if strip_zeros:
# Clean up trailing zeros
digits = digits.rstrip('0')
if digits[-1] == ".":
digits += "0"
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
if exponent >= 0: return sign + digits + "e+" + str(exponent)
if exponent < 0: return sign + digits + "e" + str(exponent)
def str_to_man_exp(x, base=10):
"""Helper function for from_str."""
# Verify that the input is a valid float literal
float(x)
# Split into mantissa, exponent
x = x.lower()
parts = x.split('e')
if len(parts) == 1:
exp = 0
else: # == 2
x = parts[0]
exp = int(parts[1])
# Look for radix point in mantissa
parts = x.split('.')
if len(parts) == 2:
a, b = parts[0], parts[1].rstrip('0')
exp -= len(b)
x = a + b
x = MPZ(int(x, base))
return x, exp
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
def from_str(x, prec, rnd=round_fast):
"""Create a raw mpf from a decimal literal, rounding in the
specified direction if the input number cannot be represented
exactly as a binary floating-point number with the given number of
bits. The literal syntax accepted is the same as for Python
floats.
TODO: the rounding does not work properly for large exponents.
"""
x = x.strip()
if x in special_str:
return special_str[x]
if '/' in x:
p, q = x.split('/')
return from_rational(int(p), int(q), prec, rnd)
man, exp = str_to_man_exp(x, base=10)
# XXX: appropriate cutoffs & track direction
# note no factors of 5
if abs(exp) > 400:
s = from_int(man, prec+10)
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
else:
if exp >= 0:
s = from_int(man * 10**exp, prec, rnd)
else:
s = from_rational(man, 10**-exp, prec, rnd)
return s
# Binary string conversion. These are currently mainly used for debugging
# and could use some improvement in the future
def from_bstr(x):
man, exp = str_to_man_exp(x, base=2)
man = MPZ(man)
sign = 0
if man < 0:
man = -man
sign = 1
bc = bitcount(man)
return normalize(sign, man, exp, bc, bc, round_floor)
def to_bstr(x):
sign, man, exp, bc = x
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
#----------------------------------------------------------------------------#
# Square roots #
#----------------------------------------------------------------------------#
def mpf_sqrt(s, prec, rnd=round_fast):
"""
Compute the square root of a nonnegative mpf value. The
result is correctly rounded.
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("square root of a negative number")
if not man:
return s
if exp & 1:
exp -= 1
man <<= 1
bc += 1
elif man == 1:
return normalize1(sign, man, exp//2, bc, prec, rnd)
shift = max(4, 2*prec-bc+4)
shift += shift & 1
if rnd in 'fd':
man = isqrt(man<<shift)
else:
man, rem = sqrtrem(man<<shift)
# Perturb up
if rem:
man = (man<<1)+1
shift += 2
return from_man_exp(man, (exp-shift)//2, prec, rnd)
def mpf_hypot(x, y, prec, rnd=round_fast):
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
x and y."""
if y == fzero: return mpf_abs(x, prec, rnd)
if x == fzero: return mpf_abs(y, prec, rnd)
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
return mpf_sqrt(hypot2, prec, rnd)
if BACKEND == 'sage':
try:
import sage.libs.mpmath.ext_libmp as ext_lib
mpf_add = ext_lib.mpf_add
mpf_sub = ext_lib.mpf_sub
mpf_mul = ext_lib.mpf_mul
mpf_div = ext_lib.mpf_div
mpf_sqrt = ext_lib.mpf_sqrt
except ImportError:
pass
| bsd-3-clause | a72dcc1e6b0318c4fd19ed131996cc0e | 30.682927 | 84 | 0.525626 | 3.537757 | false | false | false | false |
mattpap/sympy-polys | sympy/simplify/tests/test_simplify.py | 1 | 19954 | from sympy import Symbol, symbols, together, hypersimp, factorial, binomial, \
collect, Function, powsimp, separate, sin, exp, Rational, fraction, \
simplify, trigsimp, cos, tan, cot, log, ratsimp, Matrix, pi, integrate, \
solve, nsimplify, GoldenRatio, sqrt, E, I, sympify, atan, Derivative, \
S, diff, oo, Eq, Integer, gamma, acos, Integral, logcombine, separatevars
from sympy.utilities import all
from sympy.utilities.pytest import XFAIL
def test_ratsimp():
x = Symbol("x")
y = Symbol("y")
e = 1/x+1/y
assert e != (x+y)/(x*y)
assert ratsimp(e) == (x+y)/(x*y)
e = 1/(1+1/x)
assert ratsimp(e) == x/(x+1)
assert ratsimp(exp(e)) == exp(x/(x+1))
def test_ratsimp2():
x = Symbol("x")
e = 1/(1+1/x)
assert (x+1)*ratsimp(e)/x == 1
@XFAIL
def test_ratsimp_X1():
e = -x-y-(x+y)**(-1)*y**2+(x+y)**(-1)*x**2
assert e != -2*y
assert ratsimp(e) == -2*y
@XFAIL
def test_ratsimp_X2():
e = x/(x+y)+y/(x+y)
assert e != 1
assert ratsimp(e) == 1
def test_trigsimp1():
x, y = symbols('x y')
assert trigsimp(1 - sin(x)**2) == cos(x)**2
assert trigsimp(1 - cos(x)**2) == sin(x)**2
assert trigsimp(sin(x)**2 + cos(x)**2) == 1
assert trigsimp(1 + tan(x)**2) == 1/cos(x)**2
assert trigsimp(1/cos(x)**2 - 1) == tan(x)**2
assert trigsimp(1/cos(x)**2 - tan(x)**2) == 1
assert trigsimp(1 + cot(x)**2) == 1/sin(x)**2
assert trigsimp(1/sin(x)**2 - 1) == cot(x)**2
assert trigsimp(1/sin(x)**2 - cot(x)**2) == 1
assert trigsimp(5*cos(x)**2 + 5*sin(x)**2) == 5
assert trigsimp(5*cos(x/2)**2 + 2*sin(x/2)**2) in \
[2 + 3*cos(x/2)**2, 5 - 3*sin(x/2)**2]
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(2*tan(x)*cos(x)) == 2*sin(x)
assert trigsimp(cot(x)**3*sin(x)**3) == cos(x)**3
assert trigsimp(y*tan(x)**2/sin(x)**2) == y/cos(x)**2
assert trigsimp(cot(x)/cos(x)) == 1/sin(x)
assert trigsimp(cos(0.12345)**2 + sin(0.12345)**2) == 1
e = 2*sin(x)**2 + 2*cos(x)**2
assert trigsimp(log(e), deep=True) == log(2)
def test_trigsimp2():
x, y = symbols('x y')
assert trigsimp(cos(x)**2*sin(y)**2 + cos(x)**2*cos(y)**2 + sin(x)**2,
recursive=True) == 1
assert trigsimp(sin(x)**2*sin(y)**2 + sin(x)**2*cos(y)**2 + cos(x)**2,
recursive=True) == 1
def test_issue1274():
x = Symbol("x")
assert abs(trigsimp(2.0*sin(x)**2+2.0*cos(x)**2)-2.0) < 1e-10
def test_trigsimp3():
x, y = symbols('x y')
assert trigsimp(sin(x)/cos(x)) == tan(x)
assert trigsimp(sin(x)**2/cos(x)**2) == tan(x)**2
assert trigsimp(sin(x)**3/cos(x)**3) == tan(x)**3
assert trigsimp(sin(x)**10/cos(x)**10) == tan(x)**10
assert trigsimp(cos(x)/sin(x)) == 1/tan(x)
assert trigsimp(cos(x)**2/sin(x)**2) == 1/tan(x)**2
assert trigsimp(cos(x)**10/sin(x)**10) == 1/tan(x)**10
assert trigsimp(tan(x)) == trigsimp(sin(x)/cos(x))
@XFAIL
def test_factorial_simplify():
# There are more tests in test_factorials.py. These are just to
# ensure that simplify() calls factorial_simplify correctly
from sympy.specfun.factorials import factorial
x = Symbol('x')
assert simplify(factorial(x)/x) == factorial(x-1)
assert simplify(factorial(factorial(x))) == factorial(factorial(x))
def test_simplify():
x, y, z, k, n, m, w, f, s, A = symbols('xyzknmwfsA')
assert all(simplify(tmp) == tmp for tmp in [I, E, oo, x, -x, -oo, -E, -I])
e = 1/x + 1/y
assert e != (x+y)/(x*y)
assert simplify(e) == (x+y)/(x*y)
e = A**2*s**4/(4*pi*k*m**3)
assert simplify(e) == e
e = (4+4*x-2*(2+2*x))/(2+2*x)
assert simplify(e) == 0
e = (-4*x*y**2-2*y**3-2*x**2*y)/(x+y)**2
assert simplify(e) == -2*y
e = -x-y-(x+y)**(-1)*y**2+(x+y)**(-1)*x**2
assert simplify(e) == -2*y
e = (x+x*y)/x
assert simplify(e) == 1 + y
e = (f(x)+y*f(x))/f(x)
assert simplify(e) == 1 + y
e = (2 * (1/n - cos(n * pi)/n))/pi
assert simplify(e) == (2 - 2*cos(pi*n))/(pi*n)
e = integrate(1/(x**3+1), x).diff(x)
assert simplify(e) == 1/(x**3+1)
e = integrate(x/(x**2+3*x+1), x).diff(x)
assert simplify(e) == x/(x**2+3*x+1)
A = Matrix([[2*k-m*w**2, -k], [-k, k-m*w**2]]).inv()
assert simplify((A*Matrix([0,f]))[1]) == \
(2*k*f - f*m*w**2)/(k**2 - 3*k*m*w**2 + m**2*w**4)
a, b, c, d, e, f, g, h, i = symbols('abcdefghi')
f_1 = x*a + y*b + z*c - 1
f_2 = x*d + y*e + z*f - 1
f_3 = x*g + y*h + z*i - 1
solutions = solve([f_1, f_2, f_3], x, y, z, simplified=False)
assert simplify(solutions[y]) == \
(a*i+c*d+f*g-a*f-c*g-d*i)/(a*e*i+b*f*g+c*d*h-a*f*h-b*d*i-c*e*g)
def test_simplify_issue_1308():
assert simplify(exp(-Rational(1, 2)) + exp(-Rational(3, 2))) == \
(1 + E)*exp(-Rational(3, 2))
assert simplify(exp(1)+exp(-exp(1))) == (1 + exp(1 + E))*exp(-E)
def test_simplify_fail1():
x = Symbol('x')
y = Symbol('y')
e = (x+y)**2/(-4*x*y**2-2*y**3-2*x**2*y)
assert simplify(e) == 1 / (-2*y)
def test_fraction():
x, y, z = map(Symbol, 'xyz')
assert fraction(Rational(1, 2)) == (1, 2)
assert fraction(x) == (x, 1)
assert fraction(1/x) == (1, x)
assert fraction(x/y) == (x, y)
assert fraction(x/2) == (x, 2)
assert fraction(x*y/z) == (x*y, z)
assert fraction(x/(y*z)) == (x, y*z)
assert fraction(1/y**2) == (1, y**2)
assert fraction(x/y**2) == (x, y**2)
assert fraction((x**2+1)/y) == (x**2+1, y)
assert fraction(x*(y+1)/y**7) == (x*(y+1), y**7)
assert fraction(exp(-x), exact=True) == (exp(-x), 1)
def test_together():
x, y, z = map(Symbol, 'xyz')
assert together(1/x) == 1/x
assert together(1/x + 1) == (x+1)/x
assert together(1/x + x) == (x**2+1)/x
assert together(1/x + Rational(1, 2)) == (x+2)/(2*x)
assert together(1/x + 2/y) == (2*x+y)/(y*x)
assert together(1/(1 + 1/x)) == x/(1+x)
assert together(x/(1 + 1/x)) == x**2/(1+x)
assert together(1/x + 1/y + 1/z) == (x*y + x*z + y*z)/(x*y*z)
assert together(1/(x*y) + 1/(x*y)**2) == y**(-2)*x**(-2)*(1+x*y)
assert together(1/(x*y) + 1/(x*y)**4) == y**(-4)*x**(-4)*(1+x**3*y**3)
assert together(1/(x**7*y) + 1/(x*y)**4) == y**(-4)*x**(-7)*(x**3+y**3)
assert together(sin(1/x+1/y)) == sin(1/x+1/y)
assert together(sin(1/x+1/y), deep=True) == sin((x+y)/(x*y))
assert together(Rational(1, 2) + x/2) == (x+1)/2
assert together(1/x**y + 1/x**(y-1)) == x**(-y)*(1 + x)
def test_separate():
x, y, z = symbols('xyz')
assert separate((x*y*z)**4) == x**4*y**4*z**4
assert separate((x*y*z)**x) == x**x*y**x*z**x
assert separate((x*(y*z)**2)**3) == x**3*y**6*z**6
assert separate((sin((x*y)**2)*y)**z) == sin((x*y)**2)**z*y**z
assert separate((sin((x*y)**2)*y)**z, deep=True) == sin(x**2*y**2)**z*y**z
assert separate(exp(x)**2) == exp(2*x)
assert separate((exp(x)*exp(y))**2) == exp(2*x)*exp(2*y)
assert separate((exp((x*y)**z)*exp(y))**2) == exp(2*(x*y)**z)*exp(2*y)
assert separate((exp((x*y)**z)*exp(y))**2, deep=True) == exp(2*x**z*y**z)*exp(2*y)
def test_separate_X1():
x, y, z = map(Symbol, 'xyz')
assert separate((exp(x)*exp(y))**z) == exp(x*z)*exp(y*z)
def test_powsimp():
x, y, z, n = symbols('xyzn')
f = Function('f')
assert powsimp( 4**x * 2**(-x) * 2**(-x) ) == 1
assert powsimp( (-4)**x * (-2)**(-x) * 2**(-x) ) == 1
assert powsimp( f(4**x * 2**(-x) * 2**(-x)) ) == f(4**x * 2**(-x) * 2**(-x))
assert powsimp( f(4**x * 2**(-x) * 2**(-x)), deep = True ) == f(1)
assert exp(x)*exp(y) == exp(x)*exp(y)
assert powsimp(exp(x)*exp(y)) == exp(x+y)
assert powsimp(exp(x)*exp(y)*2**x*2**y) == (2*E)**(x + y)
assert powsimp(exp(x)*exp(y)*2**x*2**y, combine='exp') == exp(x+y)*2**(x+y)
assert powsimp(exp(x)*exp(y)*exp(2)*sin(x)+sin(y)+2**x*2**y) == exp(2+x+y)*sin(x)+sin(y)+2**(x+y)
assert powsimp(sin(exp(x)*exp(y))) == sin(exp(x)*exp(y))
assert powsimp(sin(exp(x)*exp(y)), deep=True) == sin(exp(x+y))
assert powsimp(x**2*x**y) == x**(2+y)
# This should remain factored, because 'exp' with deep=True is supposed
# to act like old automatic exponent combining.
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp', deep=True) == (1 + exp(1 + E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), deep=True) == exp(1) + exp(-E)
# This should not change without deep. Otherwise, simplify() will fail.
assert powsimp((1 + E*exp(E))*exp(-E)) == (1 + E*exp(E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='exp') == (1 + E*exp(E))*exp(-E)
assert powsimp((1 + E*exp(E))*exp(-E), combine='base') == (1 + E*exp(E))*exp(-E)
x,y = symbols('xy', nonnegative=True)
n = Symbol('n', real=True)
assert powsimp( y**n * (y/x)**(-n) ) == x**n
assert powsimp(x**(x**(x*y)*y**(x*y))*y**(x**(x*y)*y**(x*y)),deep=True) == (x*y)**(x*y)**(x*y)
assert powsimp(2**(2**(2*x)*x), deep=False) == 2**(2**(2*x)*x)
assert powsimp(2**(2**(2*x)*x), deep=True) == 2**(x*4**x)
assert powsimp(exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp(exp(-x + exp(-x)*exp(-x*log(x))), deep=False, combine='exp') == exp(-x + exp(-x)*exp(-x*log(x)))
assert powsimp((x+y)/(3*z), deep=False, combine='exp') == (x+y)/(3*z)
assert powsimp((x/3+y/3)/z, deep=True, combine='exp') == (x/3+y/3)/z
assert powsimp(exp(x)/(1 + exp(x)*exp(y)), deep=True) == exp(x)/(1 + exp(x + y))
assert powsimp(x*y**(z**x*z**y), deep=True) == x*y**(z**(x + y))
assert powsimp((z**x*z**y)**x, deep=True) == (z**(x + y))**x
assert powsimp(x*(z**x*z**y)**x, deep=True) == x*(z**(x + y))**x
def test_collect_1():
"""Collect with respect to a Symbol"""
x, y, z, n = symbols('xyzn')
assert collect( x + y*x, x ) == x * (1 + y)
assert collect( x + x**2, x ) == x + x**2
assert collect( x**2 + y*x**2, x ) == (x**2)*(1+y)
assert collect( x**2 + y*x, x ) == x*y + x**2
assert collect( 2*x**2 + y*x**2 + 3*x*y, [x] ) == x**2*(2+y) + 3*x*y
assert collect( 2*x**2 + y*x**2 + 3*x*y, [y] ) == 2*x**2 + y*(x**2+3*x)
assert collect( ((1 + y + x)**4).expand(), x) == ((1 + y)**4).expand() + \
x*(4*(1 + y)**3).expand() + x**2*(6*(1 + y)**2).expand() + \
x**3*(4*(1 + y)).expand() + x**4
def test_collect_2():
"""Collect with respect to a sum"""
a, b, x = symbols('abx')
assert collect(a*(cos(x)+sin(x)) + b*(cos(x)+sin(x)), sin(x)+cos(x)) == (a + b)*(cos(x) + sin(x))
def test_collect_3():
"""Collect with respect to a product"""
a, b, c = symbols('abc')
f = Function('f')
x, y, z, n = symbols('xyzn')
assert collect(-x/8 + x*y, -x) == -x*(S.One/8 - y)
assert collect( 1 + x*(y**2), x*y ) == 1 + x*(y**2)
assert collect( x*y + a*x*y, x*y) == x*y*(1 + a)
assert collect( 1 + x*y + a*x*y, x*y) == 1 + x*y*(1 + a)
assert collect(a*x*f(x) + b*(x*f(x)), x*f(x)) == x*(a + b)*f(x)
assert collect(a*x*log(x) + b*(x*log(x)), x*log(x)) == x*(a + b)*log(x)
assert collect(a*x**2*log(x)**2 + b*(x*log(x))**2, x*log(x)) == x**2*log(x)**2*(a + b)
# with respect to a product of three symbols
assert collect(y*x*z+a*x*y*z, x*y*z) == (1 + a)*x*y*z
def test_collect_4():
"""Collect with respect to a power"""
a, b, c, x = symbols('abcx')
assert collect(a*x**c + b*x**c, x**c) == x**c*(a + b)
assert collect(a*x**(2*c) + b*x**(2*c), x**c) == (x**2)**c*(a + b)
def test_collect_5():
"""Collect with respect to a tuple"""
a, x, y, z, n = symbols('axyzn')
assert collect(x**2*y**4 + z*(x*y**2)**2 + z + a*z, [x*y**2, z]) in [
z*(1 + a + x**2*y**4) + x**2*y**4,
z*(1 + a) + x**2*y**4*(1 + z) ]
assert collect((1+ (x+y) + (x+y)**2).expand(),
[x, y]) == 1 + y + x*(1 + 2*y) + x**2 + y**2
def test_collect_D():
D = Derivative
f = Function('f')
x, a, b = symbols('xab')
fx = D(f(x), x)
fxx = D(f(x), x, x)
assert collect(a*fx + b*fx, fx) == (a + b)*fx
assert collect(a*D(fx, x) + b*D(fx, x), fx) == (a + b)*D(fx, x)
assert collect(a*fxx + b*fxx , fx) == (a + b)*D(fx, x)
# 1685
assert collect(5*f(x)+3*fx, fx) == 5*f(x) + 3*fx
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x)) ==\
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(f(x) + f(x)*diff(f(x), x) + x*diff(f(x), x)*f(x), f(x).diff(x), exact=True) ==\
(x*f(x) + f(x))*D(f(x), x) + f(x)
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x), exact=True) ==\
(1/f(x) + x/f(x))*D(f(x), x) + 1/f(x)
@XFAIL
def collect_issues():
assert collect(1/f(x) + 1/f(x)*diff(f(x), x) + x*diff(f(x), x)/f(x), f(x).diff(x)) !=\
(1 + x*D(f(x), x) + D(f(x), x))/f(x)
def test_collect_D_0():
D = Derivative
f = Function('f')
x, a, b = symbols('xab')
fxx = D(f(x), x, x)
# collect does not distinguish nested derivatives, so it returns
# -- (a + b)*D(D(f, x), x)
assert collect(a*fxx + b*fxx , fxx) == (a + b)*fxx
def test_separatevars():
x,y,z,n = symbols('xyzn')
assert separatevars(2*n*x*z+2*x*y*z) == 2*x*z*(n+y)
assert separatevars(x*z+x*y*z) == x*z*(1+y)
assert separatevars(pi*x*z+pi*x*y*z) == pi*x*z*(1+y)
assert separatevars(x*y**2*sin(x) + x*sin(x)*sin(y)) == x*(sin(y) + y**2)*sin(x)
assert separatevars(x*exp(x+y)+x*exp(x)) == x*(1 + exp(y))*exp(x)
assert separatevars((x*(y+1))**z) == x**z*(1 + y)**z
assert separatevars(1+x+y+x*y) == (x+1)*(y+1)
assert separatevars(y / pi * exp(-(z - x) / cos(n))) == y * exp((x - z) / cos(n)) / pi
# 1759
p=Symbol('p',positive=True)
assert separatevars(sqrt(p**2 + x*p**2)) == p*sqrt(1 + x)
assert separatevars(sqrt(y*(p**2 + x*p**2))) == p*sqrt(y)*sqrt(1 + x)
def test_separatevars_advanced_factor():
x,y,z = symbols('xyz')
assert separatevars(1 + log(x)*log(y) + log(x) + log(y)) == (log(x) + 1)*(log(y) + 1)
assert separatevars(1 + x - log(z) - x*log(z) - exp(y)*log(z) - \
x*exp(y)*log(z) + x*exp(y) + exp(y)) == \
(1 + x)*(1 - log(z))*(1 + exp(y))
x, y = symbols('xy', positive=True)
assert separatevars(1 + log(x**log(y)) + log(x*y)) == (log(x) + 1)*(log(y) + 1)
def test_hypersimp():
n, k = symbols('nk', integer=True)
assert hypersimp(factorial(k), k) == k + 1
assert hypersimp(factorial(k**2), k) is None
assert hypersimp(1/factorial(k), k) == 1/(k + 1)
assert hypersimp(2**k/factorial(k)**2, k) == 2/(k**2+2*k+1)
assert hypersimp(binomial(n, k), k) == (n-k)/(k+1)
assert hypersimp(binomial(n+1, k), k) == (n-k+1)/(k+1)
term = (4*k+1)*factorial(k)/factorial(2*k+1)
assert hypersimp(term, k) == (4*k + 5)/(6 + 16*k**2 + 28*k)
term = 1/((2*k-1)*factorial(2*k+1))
assert hypersimp(term, k) == (2*k-1)/(6 + 22*k + 24*k**2 + 8*k**3)
term = binomial(n, k)*(-1)**k/factorial(k)
assert hypersimp(term, k) == (k - n)/(k**2+2*k+1)
def test_together2():
x, y, z = symbols("xyz")
assert together(1/(x*y) + 1/y**2) == 1/x*y**(-2)*(x + y)
assert together(1/(1 + 1/x)) == x/(1 + x)
x = symbols("x", nonnegative=True)
y = symbols("y", real=True)
assert together(1/x**y + 1/x**(y-1)) == x**(-y)*(1 + x)
def test_nsimplify():
x = Symbol("x")
assert nsimplify(0) == 0
assert nsimplify(-1) == -1
assert nsimplify(1) == 1
assert nsimplify(1+x) == 1+x
assert nsimplify(2.7) == Rational(27, 10)
assert nsimplify(1-GoldenRatio) == (1-sqrt(5))/2
assert nsimplify((1+sqrt(5))/4, [GoldenRatio]) == GoldenRatio/2
assert nsimplify(2/GoldenRatio, [GoldenRatio]) == 2*GoldenRatio - 2
assert nsimplify(exp(5*pi*I/3, evaluate=False)) == sympify('1/2 - I*3**(1/2)/2')
assert nsimplify(sin(3*pi/5, evaluate=False)) == sympify('(5/8 + 1/8*5**(1/2))**(1/2)')
assert nsimplify(sqrt(atan('1', evaluate=False))*(2+I), [pi]) == sqrt(pi) + sqrt(pi)/2*I
assert nsimplify(2 + exp(2*atan('1/4')*I)) == sympify('49/17 + 8*I/17')
assert nsimplify(pi, tolerance=0.01) == Rational(22, 7)
assert nsimplify(pi, tolerance=0.001) == Rational(355, 113)
assert nsimplify(0.33333, tolerance=1e-4) == Rational(1, 3)
assert nsimplify(2.0**(1/3.), tolerance=0.001) == Rational(635, 504)
assert nsimplify(2.0**(1/3.), tolerance=0.001, full=True) == 2**Rational(1, 3)
def test_extract_minus_sign():
x = Symbol("x")
y = Symbol("y")
a = Symbol("a")
b = Symbol("b")
assert simplify(-x/-y) == x/y
assert simplify(-x/y) == -x/y
assert simplify(x/y) == x/y
assert simplify(x/-y) == -x/y
assert simplify(-x/0) == -oo*x
assert simplify(S(-5)/0) == -oo
assert simplify(-a*x/(-y-b)) == a*x/(b + y)
def test_diff():
x = Symbol("x")
y = Symbol("y")
f = Function("f")
g = Function("g")
assert simplify(g(x).diff(x)*f(x).diff(x)-f(x).diff(x)*g(x).diff(x)) == 0
assert simplify(2*f(x)*f(x).diff(x)-diff(f(x)**2, x)) == 0
assert simplify(diff(1/f(x), x)+f(x).diff(x)/f(x)**2) == 0
assert simplify(f(x).diff(x, y)-f(x).diff(y, x)) == 0
def test_logcombine_1():
x, y = symbols("xy")
a = Symbol("a")
z, w = symbols("zw", positive=True)
b = Symbol("b", real=True)
assert logcombine(log(x)+2*log(y)) == log(x) + 2*log(y)
assert logcombine(log(x)+2*log(y), assume_pos_real=True) == log(x*y**2)
assert logcombine(a*log(w)+log(z)) == a*log(w) + log(z)
assert logcombine(b*log(z)+b*log(x)) == log(z**b) + b*log(x)
assert logcombine(b*log(z)-log(w)) == log(z**b/w)
assert logcombine(log(x)*log(z)) == log(x)*log(z)
assert logcombine(log(w)*log(x)) == log(w)*log(x)
assert logcombine(cos(-2*log(z)+b*log(w))) == cos(log(w**b/z**2))
assert logcombine(log(log(x)-log(y))-log(z), assume_pos_real=True) == \
log(log((x/y)**(1/z)))
assert logcombine((2+I)*log(x), assume_pos_real=True) == I*log(x)+log(x**2)
assert logcombine((x**2+log(x)-log(y))/(x*y), assume_pos_real=True) == \
log(x**(1/(x*y))*y**(-1/(x*y)))+x/y
assert logcombine(log(x)*2*log(y)+log(z), assume_pos_real=True) == \
log(z*y**log(x**2))
assert logcombine((x*y+sqrt(x**4+y**4)+log(x)-log(y))/(pi*x**Rational(2, 3)*\
y**Rational(3, 2)), assume_pos_real=True) == \
log(x**(1/(pi*x**Rational(2, 3)*y**Rational(3, 2)))*y**(-1/(pi*\
x**Rational(2, 3)*y**Rational(3, 2)))) + (x**4 + y**4)**Rational(1, 2)/(pi*\
x**Rational(2, 3)*y**Rational(3, 2)) + x**Rational(1, 3)/(pi*y**Rational(1, 2))
assert logcombine(Eq(log(x), -2*log(y)), assume_pos_real=True) == \
Eq(log(x*y**2), Integer(0))
assert logcombine(Eq(y, x*acos(-log(x/y))), assume_pos_real=True) == \
Eq(y, x*acos(log(y/x)))
assert logcombine(gamma(-log(x/y))*acos(-log(x/y)), assume_pos_real=True) == \
acos(log(y/x))*gamma(log(y/x))
assert logcombine((2+3*I)*log(x), assume_pos_real=True) == \
log(x**2)+3*I*log(x)
assert logcombine(Eq(y, -log(x)), assume_pos_real=True) == Eq(y, log(1/x))
assert logcombine(Integral((sin(x**2)+cos(x**3))/x, x), assume_pos_real=True) == \
Integral((sin(x**2)+cos(x**3))/x, x)
assert logcombine(Integral((sin(x**2)+cos(x**3))/x, x)+ (2+3*I)*log(x), \
assume_pos_real=True) == log(x**2)+3*I*log(x) + \
Integral((sin(x**2)+cos(x**3))/x, x)
@XFAIL
def test_logcombine_2():
# The same as one of the tests above, but with Rational(a, b) replaced with a/b.
# This fails because of a bug in matches. See issue 1274.
x, y = symbols("xy")
assert logcombine((x*y+sqrt(x**4+y**4)+log(x)-log(y))/(pi*x**(2/3)*y**(3/2)), \
assume_pos_real=True) == log(x**(1/(pi*x**(2/3)*y**(3/2)))*y**(-1/\
(pi*x**(2/3)*y**(3/2)))) + (x**4 + y**4)**(1/2)/(pi*x**(2/3)*y**(3/2)) + \
x**(1/3)/(pi*y**(1/2))
| bsd-3-clause | d5d8a8cf9cb52e89f28aa21cd2109940 | 38.66998 | 115 | 0.514183 | 2.343394 | false | true | false | false |
mattpap/sympy-polys | sympy/thirdparty/pyglet/pyglet/window/carbon/types.py | 7 | 4141 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
from ctypes import *
import pyglet.gl.agl
agl = pyglet.gl.agl
Boolean = c_ubyte # actually an unsigned char
Fixed = c_int32
ItemCount = c_uint32
ByteOffset = ByteCount = c_uint32
GDHandle = agl.GDHandle
class Rect(Structure):
_fields_ = [
('top', c_short),
('left', c_short),
('bottom', c_short),
('right', c_short)
]
class Point(Structure):
_fields_ = [
('v', c_short),
('h', c_short),
]
class CGPoint(Structure):
_fields_ = [
('x', c_float),
('y', c_float),
]
class CGSize(Structure):
_fields_ = [
('width', c_float),
('height', c_float)
]
class CGRect(Structure):
_fields_ = [
('origin', CGPoint),
('size', CGSize)
]
__slots__ = ['origin', 'size']
CGDirectDisplayID = c_void_p
CGDisplayCount = c_uint32
CGTableCount = c_uint32
CGDisplayCoord = c_int32
CGByteValue = c_ubyte
CGOpenGLDisplayMask = c_uint32
CGRefreshRate = c_double
CGCaptureOptions = c_uint32
HIPoint = CGPoint
HISize = CGSize
HIRect = CGRect
class EventTypeSpec(Structure):
_fields_ = [
('eventClass', c_uint32),
('eventKind', c_uint32)
]
WindowRef = c_void_p
EventRef = c_void_p
EventTargetRef = c_void_p
EventHandlerRef = c_void_p
MenuRef = c_void_p
MenuID = c_int16
MenuItemIndex = c_uint16
MenuCommand = c_uint32
CFStringEncoding = c_uint
WindowClass = c_uint32
WindowAttributes = c_uint32
WindowPositionMethod = c_uint32
EventMouseButton = c_uint16
EventMouseWheelAxis = c_uint16
OSType = c_uint32
OSStatus = c_int32
class MouseTrackingRegionID(Structure):
_fields_ = [('signature', OSType),
('id', c_int32)]
MouseTrackingRef = c_void_p
RgnHandle = c_void_p
class ProcessSerialNumber(Structure):
_fields_ = [('highLongOfPSN', c_uint32),
('lowLongOfPSN', c_uint32)]
class HICommand_Menu(Structure):
_fields_ = [
('menuRef', MenuRef),
('menuItemIndex', MenuItemIndex),
]
class HICommand(Structure):
_fields_ = [
('attributes', c_uint32),
('commandID', c_uint32),
('menu', HICommand_Menu)
]
class EventRecord(Structure):
_fields_ = [
('what', c_uint16),
('message', c_uint32),
('when', c_uint32),
('where', Point),
('modifiers', c_uint16)
]
| bsd-3-clause | 1dc021deec179d38f7028c69fa78fc73 | 25.044025 | 78 | 0.639459 | 3.622922 | false | false | false | false |
mattpap/sympy-polys | sympy/mpmath/libmp/libelefun.py | 1 | 43291 | """
This module implements computation of elementary transcendental
functions (powers, logarithms, trigonometric and hyperbolic
functions, inverse trigonometric and hyperbolic) for real
floating-point numbers.
For complex and interval implementations of the same functions,
see libmpc and libmpi.
"""
import math
from bisect import bisect
from backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, BACKEND
from libmpf import (
round_floor, round_ceiling, round_down, round_up,
round_nearest, round_fast,
ComplexResult,
bitcount, bctable, lshift, rshift, giant_steps, sqrt_fixed,
from_int, to_int, from_man_exp, to_fixed, to_float, from_float,
from_rational, normalize,
fzero, fone, fnone, fhalf, finf, fninf, fnan,
mpf_cmp, mpf_sign, mpf_abs,
mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_div, mpf_shift,
mpf_rdiv_int, mpf_pow_int, mpf_sqrt,
reciprocal_rnd, negative_rnd, mpf_perturb,
isqrt_fast
)
from libintmath import ifib
#-------------------------------------------------------------------------------
# Tuning parameters
#-------------------------------------------------------------------------------
# Cutoff for computing exp from cosh+sinh. This reduces the
# number of terms by half, but also requires a square root which
# is expensive with the pure-Python square root code.
if BACKEND == 'python':
EXP_COSH_CUTOFF = 600
else:
EXP_COSH_CUTOFF = 400
# Cutoff for using more than 2 series
EXP_SERIES_U_CUTOFF = 1500
# Also basically determined by sqrt
if BACKEND == 'python':
COS_SIN_CACHE_PREC = 400
else:
COS_SIN_CACHE_PREC = 200
COS_SIN_CACHE_STEP = 8
cos_sin_cache = {}
# Number of integer logarithms to cache (for zeta sums)
MAX_LOG_INT_CACHE = 2000
log_int_cache = {}
LOG_TAYLOR_PREC = 2500 # Use Taylor series with caching up to this prec
LOG_TAYLOR_SHIFT = 9 # Cache log values in steps of size 2^-N
log_taylor_cache = {}
# prec/size ratio of x for fastest convergence in AGM formula
LOG_AGM_MAG_PREC_RATIO = 20
ATAN_TAYLOR_PREC = 3000 # Same as for log
ATAN_TAYLOR_SHIFT = 7 # steps of size 2^-N
atan_taylor_cache = {}
# ~= next power of two + 20
cache_prec_steps = [22,22]
for k in xrange(1, bitcount(LOG_TAYLOR_PREC)+1):
cache_prec_steps += [min(2**k,LOG_TAYLOR_PREC)+20] * 2**(k-1)
#----------------------------------------------------------------------------#
# #
# Elementary mathematical constants #
# #
#----------------------------------------------------------------------------#
def constant_memo(f):
"""
Decorator for caching computed values of mathematical
constants. This decorator should be applied to a
function taking a single argument prec as input and
returning a fixed-point value with the given precision.
"""
f.memo_prec = -1
f.memo_val = None
def g(prec, **kwargs):
memo_prec = f.memo_prec
if prec <= memo_prec:
return f.memo_val >> (memo_prec-prec)
newprec = int(prec*1.05+10)
f.memo_val = f(newprec, **kwargs)
f.memo_prec = newprec
return f.memo_val >> (newprec-prec)
g.__name__ = f.__name__
g.__doc__ = f.__doc__
return g
def def_mpf_constant(fixed):
"""
Create a function that computes the mpf value for a mathematical
constant, given a function that computes the fixed-point value.
Assumptions: the constant is positive and has magnitude ~= 1;
the fixed-point function rounds to floor.
"""
def f(prec, rnd=round_fast):
wp = prec + 20
v = fixed(wp)
if rnd in (round_up, round_ceiling):
v += 1
return normalize(0, v, -wp, bitcount(v), prec, rnd)
f.__doc__ = fixed.__doc__
return f
def bsp_acot(q, a, b, hyperbolic):
if b - a == 1:
a1 = MPZ(2*a + 3)
if hyperbolic or a&1:
return MPZ_ONE, a1 * q**2, a1
else:
return -MPZ_ONE, a1 * q**2, a1
m = (a+b)//2
p1, q1, r1 = bsp_acot(q, a, m, hyperbolic)
p2, q2, r2 = bsp_acot(q, m, b, hyperbolic)
return q2*p1 + r1*p2, q1*q2, r1*r2
# the acoth(x) series converges like the geometric series for x^2
# N = ceil(p*log(2)/(2*log(x)))
def acot_fixed(a, prec, hyperbolic):
"""
Compute acot(a) or acoth(a) for an integer a with binary splitting; see
http://numbers.computation.free.fr/Constants/Algorithms/splitting.html
"""
N = int(0.35 * prec/math.log(a) + 20)
p, q, r = bsp_acot(a, 0,N, hyperbolic)
return ((p+q)<<prec)//(q*a)
def machin(coefs, prec, hyperbolic=False):
"""
Evaluate a Machin-like formula, i.e., a linear combination of
acot(n) or acoth(n) for specific integer values of n, using fixed-
point arithmetic. The input should be a list [(c, n), ...], giving
c*acot[h](n) + ...
"""
extraprec = 10
s = MPZ_ZERO
for a, b in coefs:
s += MPZ(a) * acot_fixed(MPZ(b), prec+extraprec, hyperbolic)
return (s >> extraprec)
# Logarithms of integers are needed for various computations involving
# logarithms, powers, radix conversion, etc
@constant_memo
def ln2_fixed(prec):
"""
Computes ln(2). This is done with a hyperbolic Machin-type formula,
with binary splitting at high precision.
"""
return machin([(18, 26), (-2, 4801), (8, 8749)], prec, True)
@constant_memo
def ln10_fixed(prec):
"""
Computes ln(10). This is done with a hyperbolic Machin-type formula.
"""
return machin([(46, 31), (34, 49), (20, 161)], prec, True)
"""
For computation of pi, we use the Chudnovsky series:
oo
___ k
1 \ (-1) (6 k)! (A + B k)
----- = ) -----------------------
12 pi /___ 3 3k+3/2
(3 k)! (k!) C
k = 0
where A, B, and C are certain integer constants. This series adds roughly
14 digits per term. Note that C^(3/2) can be extracted so that the
series contains only rational terms. This makes binary splitting very
efficient.
The recurrence formulas for the binary splitting were taken from
ftp://ftp.gmplib.org/pub/src/gmp-chudnovsky.c
Previously, Machin's formula was used at low precision and the AGM iteration
was used at high precision. However, the Chudnovsky series is essentially as
fast as the Machin formula at low precision and in practice about 3x faster
than the AGM at high precision (despite theoretically having a worse
asymptotic complexity), so there is no reason not to use it in all cases.
"""
# Constants in Chudnovsky's series
CHUD_A = MPZ(13591409)
CHUD_B = MPZ(545140134)
CHUD_C = MPZ(640320)
CHUD_D = MPZ(12)
def bs_chudnovsky(a, b, level, verbose):
"""
Computes the sum from a to b of the series in the Chudnovsky
formula. Returns g, p, q where p/q is the sum as an exact
fraction and g is a temporary value used to save work
for recursive calls.
"""
if b-a == 1:
g = MPZ((6*b-5)*(2*b-1)*(6*b-1))
p = b**3 * CHUD_C**3 // 24
q = (-1)**b * g * (CHUD_A+CHUD_B*b)
else:
if verbose and level < 4:
print " binary splitting", a, b
mid = (a+b)//2
g1, p1, q1 = bs_chudnovsky(a, mid, level+1, verbose)
g2, p2, q2 = bs_chudnovsky(mid, b, level+1, verbose)
p = p1*p2
g = g1*g2
q = q1*p2 + q2*g1
return g, p, q
@constant_memo
def pi_fixed(prec, verbose=False, verbose_base=None):
"""
Compute floor(pi * 2**prec) as a big integer.
This is done using Chudnovsky's series (see comments in
libelefun.py for details).
"""
# The Chudnovsky series gives 14.18 digits per term
N = int(prec/3.3219280948/14.181647462 + 2)
if verbose:
print "binary splitting with N =", N
g, p, q = bs_chudnovsky(0, N, 0, verbose)
sqrtC = isqrt_fast(CHUD_C<<(2*prec))
v = p*CHUD_C*sqrtC//((q+CHUD_A*p)*CHUD_D)
return v
def degree_fixed(prec):
return pi_fixed(prec)//180
def bspe(a, b):
"""
Sum series for exp(1)-1 between a, b, returning the result
as an exact fraction (p, q).
"""
if b-a == 1:
return MPZ_ONE, MPZ(b)
m = (a+b)//2
p1, q1 = bspe(a, m)
p2, q2 = bspe(m, b)
return p1*q2+p2, q1*q2
@constant_memo
def e_fixed(prec):
"""
Computes exp(1). This is done using the ordinary Taylor series for
exp, with binary splitting. For a description of the algorithm,
see:
http://numbers.computation.free.fr/Constants/
Algorithms/splitting.html
"""
# Slight overestimate of N needed for 1/N! < 2**(-prec)
# This could be tightened for large N.
N = int(1.1*prec/math.log(prec) + 20)
p, q = bspe(0,N)
return ((p+q)<<prec)//q
@constant_memo
def phi_fixed(prec):
"""
Computes the golden ratio, (1+sqrt(5))/2
"""
prec += 10
a = isqrt_fast(MPZ_FIVE<<(2*prec)) + (MPZ_ONE << prec)
return a >> 11
mpf_phi = def_mpf_constant(phi_fixed)
mpf_pi = def_mpf_constant(pi_fixed)
mpf_e = def_mpf_constant(e_fixed)
mpf_degree = def_mpf_constant(degree_fixed)
mpf_ln2 = def_mpf_constant(ln2_fixed)
mpf_ln10 = def_mpf_constant(ln10_fixed)
@constant_memo
def ln_sqrt2pi_fixed(prec):
wp = prec + 10
# ln(sqrt(2*pi)) = ln(2*pi)/2
return to_fixed(mpf_log(mpf_shift(mpf_pi(wp), 1), wp), prec-1)
@constant_memo
def sqrtpi_fixed(prec):
return sqrt_fixed(pi_fixed(prec), prec)
mpf_sqrtpi = def_mpf_constant(sqrtpi_fixed)
mpf_ln_sqrt2pi = def_mpf_constant(ln_sqrt2pi_fixed)
#----------------------------------------------------------------------------#
# #
# Powers #
# #
#----------------------------------------------------------------------------#
def mpf_pow(s, t, prec, rnd=round_fast):
"""
Compute s**t. Raises ComplexResult if s is negative and t is
fractional.
"""
ssign, sman, sexp, sbc = s
tsign, tman, texp, tbc = t
if ssign and texp < 0:
raise ComplexResult("negative number raised to a fractional power")
if texp >= 0:
return mpf_pow_int(s, (-1)**tsign * (tman<<texp), prec, rnd)
# s**(n/2) = sqrt(s)**n
if texp == -1:
if tman == 1:
if tsign:
return mpf_div(fone, mpf_sqrt(s, prec+10,
reciprocal_rnd[rnd]), prec, rnd)
return mpf_sqrt(s, prec, rnd)
else:
if tsign:
return mpf_pow_int(mpf_sqrt(s, prec+10,
reciprocal_rnd[rnd]), -tman, prec, rnd)
return mpf_pow_int(mpf_sqrt(s, prec+10, rnd), tman, prec, rnd)
# General formula: s**t = exp(t*log(s))
# TODO: handle rnd direction of the logarithm carefully
c = mpf_log(s, prec+10, rnd)
return mpf_exp(mpf_mul(t, c), prec, rnd)
def int_pow_fixed(y, n, prec):
"""n-th power of a fixed point number with precision prec
Returns the power in the form man, exp,
man * 2**exp ~= y**n
"""
if n == 2:
return (y*y), 0
bc = bitcount(y)
exp = 0
workprec = 2 * (prec + 4*bitcount(n) + 4)
_, pm, pe, pbc = fone
while 1:
if n & 1:
pm = pm*y
pe = pe+exp
pbc += bc - 2
pbc = pbc + bctable[int(pm >> pbc)]
if pbc > workprec:
pm = pm >> (pbc-workprec)
pe += pbc - workprec
pbc = workprec
n -= 1
if not n:
break
y = y*y
exp = exp+exp
bc = bc + bc - 2
bc = bc + bctable[int(y >> bc)]
if bc > workprec:
y = y >> (bc-workprec)
exp += bc - workprec
bc = workprec
n = n // 2
return pm, pe
# froot(s, n, prec, rnd) computes the real n-th root of a
# positive mpf tuple s.
# To compute the root we start from a 50-bit estimate for r
# generated with ordinary floating-point arithmetic, and then refine
# the value to full accuracy using the iteration
# 1 / y \
# r = --- | (n-1) * r + ---------- |
# n+1 n \ n r_n**(n-1) /
# which is simply Newton's method applied to the equation r**n = y.
# With giant_steps(start, prec+extra) = [p0,...,pm, prec+extra]
# and y = man * 2**-shift one has
# (man * 2**exp)**(1/n) =
# y**(1/n) * 2**(start-prec/n) * 2**(p0-start) * ... * 2**(prec+extra-pm) *
# 2**((exp+shift-(n-1)*prec)/n -extra))
# The last factor is accounted for in the last line of froot.
def nthroot_fixed(y, n, prec, exp1):
start = 50
try:
y1 = rshift(y, prec - n*start)
r = MPZ(int(y1**(1.0/n)))
except OverflowError:
y1 = from_int(y1, start)
fn = from_int(n)
fn = mpf_rdiv_int(1, fn, start)
r = mpf_pow(y1, fn, start)
r = to_int(r)
extra = 10
extra1 = n
prevp = start
for p in giant_steps(start, prec+extra):
pm, pe = int_pow_fixed(r, n-1, prevp)
r2 = rshift(pm, (n-1)*prevp - p - pe - extra1)
B = lshift(y, 2*p-prec+extra1)//r2
r = (B + (n-1) * lshift(r, p-prevp))//n
prevp = p
return r
def mpf_nthroot(s, n, prec, rnd=round_fast):
"""nth-root of a positive number
Use the Newton method when faster, otherwise use x**(1/n)
"""
sign, man, exp, bc = s
if sign:
raise ComplexResult("nth root of a negative number")
if not man:
if s == fnan:
return fnan
if s == fzero:
if n > 0:
return fzero
if n == 0:
return fone
return finf
# Infinity
if not n:
return fnan
if n < 0:
return fzero
return finf
flag_inverse = False
if n < 2:
if n == 0:
return fone
if n == 1:
return mpf_pos(s, prec, rnd)
if n == -1:
return mpf_div(fone, s, prec, rnd)
# n < 0
rnd = reciprocal_rnd[rnd]
flag_inverse = True
extra_inverse = 5
prec += extra_inverse
n = -n
if n > 20 and (n >= 20000 or prec < int(233 + 28.3 * n**0.62)):
prec2 = prec + 10
fn = from_int(n)
nth = mpf_rdiv_int(1, fn, prec2)
r = mpf_pow(s, nth, prec2, rnd)
s = normalize(r[0], r[1], r[2], r[3], prec, rnd)
if flag_inverse:
return mpf_div(fone, s, prec-extra_inverse, rnd)
else:
return s
# Convert to a fixed-point number with prec2 bits.
prec2 = prec + 2*n - (prec%n)
# a few tests indicate that
# for 10 < n < 10**4 a bit more precision is needed
if n > 10:
prec2 += prec2//10
prec2 = prec2 - prec2%n
# Mantissa may have more bits than we need. Trim it down.
shift = bc - prec2
# Adjust exponents to make prec2 and exp+shift multiples of n.
sign1 = 0
es = exp+shift
if es < 0:
sign1 = 1
es = -es
if sign1:
shift += es%n
else:
shift -= es%n
man = rshift(man, shift)
extra = 10
exp1 = ((exp+shift-(n-1)*prec2)//n) - extra
rnd_shift = 0
if flag_inverse:
if rnd == 'u' or rnd == 'c':
rnd_shift = 1
else:
if rnd == 'd' or rnd == 'f':
rnd_shift = 1
man = nthroot_fixed(man+rnd_shift, n, prec2, exp1)
s = from_man_exp(man, exp1, prec, rnd)
if flag_inverse:
return mpf_div(fone, s, prec-extra_inverse, rnd)
else:
return s
def mpf_cbrt(s, prec, rnd=round_fast):
"""cubic root of a positive number"""
return mpf_nthroot(s, 3, prec, rnd)
#----------------------------------------------------------------------------#
# #
# Logarithms #
# #
#----------------------------------------------------------------------------#
def log_int_fixed(n, prec, ln2=None):
"""
Fast computation of log(n), caching the value for small n,
intended for zeta sums.
"""
if n in log_int_cache:
value, vprec = log_int_cache[n]
if vprec >= prec:
return value >> (vprec - prec)
wp = prec + 10
if wp <= LOG_TAYLOR_SHIFT:
if ln2 is None:
ln2 = ln2_fixed(wp)
r = bitcount(n)
x = n << (wp-r)
v = log_taylor_cached(x, wp) + r*ln2
else:
v = to_fixed(mpf_log(from_int(n), wp+5), wp)
if n < MAX_LOG_INT_CACHE:
log_int_cache[n] = (v, wp)
return v >> (wp-prec)
def agm_fixed(a, b, prec):
"""
Fixed-point computation of agm(a,b), assuming
a, b both close to unit magnitude.
"""
i = 0
while 1:
anew = (a+b)>>1
if i > 4 and abs(a-anew) < 8:
return a
b = isqrt_fast(a*b)
a = anew
i += 1
return a
def log_agm(x, prec):
"""
Fixed-point computation of -log(x) = log(1/x), suitable
for large precision. It is required that 0 < x < 1. The
algorithm used is the Sasaki-Kanada formula
-log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1]
For faster convergence in the theta functions, x should
be chosen closer to 0.
Guard bits must be added by the caller.
HYPOTHESIS: if x = 2^(-n), n bits need to be added to
account for the truncation to a fixed-point number,
and this is the only significant cancellation error.
The number of bits lost to roundoff is small and can be
considered constant.
[1] Richard P. Brent, "Fast Algorithms for High-Precision
Computation of Elementary Functions (extended abstract)",
http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf
"""
x2 = (x*x) >> prec
# Compute jtheta2(x)**2
s = a = b = x2
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
s += a
s += (MPZ_ONE<<prec)
s = (s*s)>>(prec-2)
s = (s*isqrt_fast(x<<prec))>>prec
# Compute jtheta3(x)**2
t = a = b = x
while a:
b = (b*x2) >> prec
a = (a*b) >> prec
t += a
t = (MPZ_ONE<<prec) + (t<<1)
t = (t*t)>>prec
# Final formula
p = agm_fixed(s, t, prec)
return (pi_fixed(prec) << prec) // p
def log_taylor(x, prec, r=0):
"""
Fixed-point calculation of log(x). It is assumed that x is close
enough to 1 for the Taylor series to converge quickly. Convergence
can be improved by specifying r > 0 to compute
log(x^(1/2^r))*2^r, at the cost of performing r square roots.
The caller must provide sufficient guard bits.
"""
for i in xrange(r):
x = isqrt_fast(x<<prec)
one = MPZ_ONE << prec
v = ((x-one)<<prec)//(x+one)
sign = v < 0
if sign:
v = -v
v2 = (v*v) >> prec
v4 = (v2*v2) >> prec
s0 = v
s1 = v//3
v = (v*v4) >> prec
k = 5
while v:
s0 += v // k
k += 2
s1 += v // k
v = (v*v4) >> prec
k += 2
s1 = (s1*v2) >> prec
s = (s0+s1) << (1+r)
if sign:
return -s
return s
def log_taylor_cached(x, prec):
"""
Fixed-point computation of log(x), assuming x in (0.5, 2)
and prec <= LOG_TAYLOR_PREC.
"""
n = x >> (prec-LOG_TAYLOR_SHIFT)
cached_prec = cache_prec_steps[prec]
dprec = cached_prec - prec
if (n, cached_prec) in log_taylor_cache:
a, log_a = log_taylor_cache[n, cached_prec]
else:
a = n << (cached_prec - LOG_TAYLOR_SHIFT)
log_a = log_taylor(a, cached_prec, 8)
log_taylor_cache[n, cached_prec] = (a, log_a)
a >>= dprec
log_a >>= dprec
u = ((x - a) << prec) // a
v = (u << prec) // ((MPZ_TWO << prec) + u)
v2 = (v*v) >> prec
v4 = (v2*v2) >> prec
s0 = v
s1 = v//3
v = (v*v4) >> prec
k = 5
while v:
s0 += v//k
k += 2
s1 += v//k
v = (v*v4) >> prec
k += 2
s1 = (s1*v2) >> prec
s = (s0+s1) << 1
return log_a + s
def mpf_log(x, prec, rnd=round_fast):
"""
Compute the natural logarithm of the mpf value x. If x is negative,
ComplexResult is raised.
"""
sign, man, exp, bc = x
#------------------------------------------------------------------
# Handle special values
if not man:
if x == fzero: return fninf
if x == finf: return finf
if x == fnan: return fnan
if sign:
raise ComplexResult("logarithm of a negative number")
wp = prec + 20
#------------------------------------------------------------------
# Handle log(2^n) = log(n)*2.
# Here we catch the only possible exact value, log(1) = 0
if man == 1:
if not exp:
return fzero
return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
mag = exp+bc
abs_mag = abs(mag)
#------------------------------------------------------------------
# Handle x = 1+eps, where log(x) ~ x. We need to check for
# cancellation when moving to fixed-point math and compensate
# by increasing the precision. Note that abs_mag in (0, 1) <=>
# 0.5 < x < 2 and x != 1
if abs_mag <= 1:
# Calculate t = x-1 to measure distance from 1 in bits
tsign = 1-abs_mag
if tsign:
tman = (MPZ_ONE<<bc) - man
else:
tman = man - (MPZ_ONE<<(bc-1))
tbc = bitcount(tman)
cancellation = bc - tbc
if cancellation > wp:
t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')
return mpf_perturb(t, tsign, prec, rnd)
else:
wp += cancellation
# TODO: if close enough to 1, we could use Taylor series
# even in the AGM precision range, since the Taylor series
# converges rapidly
#------------------------------------------------------------------
# Another special case:
# n*log(2) is a good enough approximation
if abs_mag > 10000:
if bitcount(abs_mag) > wp:
return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
#------------------------------------------------------------------
# General case.
# Perform argument reduction using log(x) = log(x*2^n) - n*log(2):
# If we are in the Taylor precision range, choose magnitude 0 or 1.
# If we are in the AGM precision range, choose magnitude -m for
# some large m; benchmarking on one machine showed m = prec/20 to be
# optimal between 1000 and 100,000 digits.
if wp <= LOG_TAYLOR_PREC:
m = log_taylor_cached(lshift(man, wp-bc), wp)
if mag:
m += mag*ln2_fixed(wp)
else:
optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO
n = optimal_mag - mag
x = mpf_shift(x, n)
wp += (-optimal_mag)
m = -log_agm(to_fixed(x, wp), wp)
m -= n*ln2_fixed(wp)
return from_man_exp(m, -wp, prec, rnd)
def mpf_log_hypot(a, b, prec, rnd):
"""
Computes log(sqrt(a^2+b^2)) accurately.
"""
# If either a or b is inf/nan/0, assume it to be a
if not b[1]:
a, b = b, a
# a is inf/nan/0
if not a[1]:
# both are inf/nan/0
if not b[1]:
if a == b == fzero:
return fninf
if fnan in (a, b):
return fnan
# at least one term is (+/- inf)^2
return finf
# only a is inf/nan/0
if a == fzero:
# log(sqrt(0+b^2)) = log(|b|)
return mpf_log(mpf_abs(b), prec, rnd)
if a == fnan:
return fnan
return finf
# Exact
a2 = mpf_mul(a,a)
b2 = mpf_mul(b,b)
extra = 20
# Not exact
h2 = mpf_add(a2, b2, prec+extra)
cancelled = mpf_add(h2, fnone, 10)
mag_cancelled = cancelled[2]+cancelled[3]
# Just redo the sum exactly if necessary (could be smarter
# and avoid memory allocation when a or b is precisely 1
# and the other is tiny...)
if cancelled == fzero or mag_cancelled < -extra//2:
h2 = mpf_add(a2, b2, prec+extra-min(a2[2],b2[2]))
return mpf_shift(mpf_log(h2, prec, rnd), -1)
#----------------------------------------------------------------------
# Inverse tangent
#
def atan_newton(x, prec):
if prec >= 100:
r = math.atan((x>>(prec-53))/2.0**53)
else:
r = math.atan(x/2.0**prec)
prevp = 50
r = int(r * 2.0**53) >> (53-prevp)
extra_p = 50
for wp in giant_steps(prevp, prec):
wp += extra_p
r = r << (wp-prevp)
cos, sin = cos_sin_fixed(r, wp)
tan = (sin << wp) // cos
a = ((tan-rshift(x, prec-wp)) << wp) // ((MPZ_ONE<<wp) + ((tan**2)>>wp))
r = r - a
prevp = wp
return rshift(r, prevp-prec)
def atan_taylor_get_cached(n, prec):
# Taylor series with caching wins up to huge precisions
# To avoid unnecessary precomputation at low precision, we
# do it in steps
# Round to next power of 2
prec2 = (1<<(bitcount(prec-1))) + 20
dprec = prec2 - prec
if (n, prec2) in atan_taylor_cache:
a, atan_a = atan_taylor_cache[n, prec2]
else:
a = n << (prec2 - ATAN_TAYLOR_SHIFT)
atan_a = atan_newton(a, prec2)
atan_taylor_cache[n, prec2] = (a, atan_a)
return (a >> dprec), (atan_a >> dprec)
def atan_taylor(x, prec):
n = (x >> (prec-ATAN_TAYLOR_SHIFT))
a, atan_a = atan_taylor_get_cached(n, prec)
d = x - a
s0 = v = (d << prec) // ((a**2 >> prec) + (a*d >> prec) + (MPZ_ONE << prec))
v2 = (v**2 >> prec)
v4 = (v2 * v2) >> prec
s1 = v//3
v = (v * v4) >> prec
k = 5
while v:
s0 += v // k
k += 2
s1 += v // k
v = (v * v4) >> prec
k += 2
s1 = (s1 * v2) >> prec
s = s0 - s1
return atan_a + s
def atan_inf(sign, prec, rnd):
if not sign:
return mpf_shift(mpf_pi(prec, rnd), -1)
return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
def mpf_atan(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if not man:
if x == fzero: return fzero
if x == finf: return atan_inf(0, prec, rnd)
if x == fninf: return atan_inf(1, prec, rnd)
return fnan
mag = exp + bc
# Essentially infinity
if mag > prec+20:
return atan_inf(sign, prec, rnd)
# Essentially ~ x
if -mag > prec+20:
return mpf_perturb(x, 1-sign, prec, rnd)
wp = prec + 30 + abs(mag)
# For large x, use atan(x) = pi/2 - atan(1/x)
if mag >= 2:
x = mpf_rdiv_int(1, x, wp)
reciprocal = True
else:
reciprocal = False
t = to_fixed(x, wp)
if sign:
t = -t
if wp < ATAN_TAYLOR_PREC:
a = atan_taylor(t, wp)
else:
a = atan_newton(t, wp)
if reciprocal:
a = ((pi_fixed(wp)>>1)+1) - a
if sign:
a = -a
return from_man_exp(a, -wp, prec, rnd)
# TODO: cleanup the special cases
def mpf_atan2(y, x, prec, rnd=round_fast):
xsign, xman, xexp, xbc = x
ysign, yman, yexp, ybc = y
if not yman:
if y == fzero and x != fnan:
if mpf_sign(x) >= 0:
return fzero
return mpf_pi(prec, rnd)
if y in (finf, fninf):
if x in (finf, fninf):
return fnan
# pi/2
if y == finf:
return mpf_shift(mpf_pi(prec, rnd), -1)
# -pi/2
return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
return fnan
if ysign:
return mpf_neg(mpf_atan2(mpf_neg(y), x, prec, negative_rnd[rnd]))
if not xman:
if x == fnan:
return fnan
if x == finf:
return fzero
if x == fninf:
return mpf_pi(prec, rnd)
if y == fzero:
return fzero
return mpf_shift(mpf_pi(prec, rnd), -1)
tquo = mpf_atan(mpf_div(y, x, prec+4), prec+4)
if xsign:
return mpf_add(mpf_pi(prec+4), tquo, prec, rnd)
else:
return mpf_pos(tquo, prec, rnd)
def mpf_asin(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if bc+exp > 0 and x not in (fone, fnone):
raise ComplexResult("asin(x) is real only for -1 <= x <= 1")
# asin(x) = 2*atan(x/(1+sqrt(1-x**2)))
wp = prec + 15
a = mpf_mul(x, x)
b = mpf_add(fone, mpf_sqrt(mpf_sub(fone, a, wp), wp), wp)
c = mpf_div(x, b, wp)
return mpf_shift(mpf_atan(c, prec, rnd), 1)
def mpf_acos(x, prec, rnd=round_fast):
# acos(x) = 2*atan(sqrt(1-x**2)/(1+x))
sign, man, exp, bc = x
if bc + exp > 0:
if x not in (fone, fnone):
raise ComplexResult("acos(x) is real only for -1 <= x <= 1")
if x == fnone:
return mpf_pi(prec, rnd)
wp = prec + 15
a = mpf_mul(x, x)
b = mpf_sqrt(mpf_sub(fone, a, wp), wp)
c = mpf_div(b, mpf_add(fone, x, wp), wp)
return mpf_shift(mpf_atan(c, prec, rnd), 1)
def mpf_asinh(x, prec, rnd=round_fast):
wp = prec + 20
sign, man, exp, bc = x
mag = exp+bc
if mag < -8:
if mag < -wp:
return mpf_perturb(x, 1-sign, prec, rnd)
wp += (-mag)
# asinh(x) = log(x+sqrt(x**2+1))
# use reflection symmetry to avoid cancellation
q = mpf_sqrt(mpf_add(mpf_mul(x, x), fone, wp), wp)
q = mpf_add(mpf_abs(x), q, wp)
if sign:
return mpf_neg(mpf_log(q, prec, negative_rnd[rnd]))
else:
return mpf_log(q, prec, rnd)
def mpf_acosh(x, prec, rnd=round_fast):
# acosh(x) = log(x+sqrt(x**2-1))
wp = prec + 15
if mpf_cmp(x, fone) == -1:
raise ComplexResult("acosh(x) is real only for x >= 1")
q = mpf_sqrt(mpf_add(mpf_mul(x,x), fnone, wp), wp)
return mpf_log(mpf_add(x, q, wp), prec, rnd)
def mpf_atanh(x, prec, rnd=round_fast):
# atanh(x) = log((1+x)/(1-x))/2
sign, man, exp, bc = x
if (not man) and exp:
if x in (fzero, fnan):
return x
raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
mag = bc + exp
if mag > 0:
if mag == 1 and man == 1:
return [finf, fninf][sign]
raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
wp = prec + 15
if mag < -8:
if mag < -wp:
return mpf_perturb(x, sign, prec, rnd)
wp += (-mag)
a = mpf_add(x, fone, wp)
b = mpf_sub(fone, x, wp)
return mpf_shift(mpf_log(mpf_div(a, b, wp), prec, rnd), -1)
def mpf_fibonacci(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if not man:
if x == fninf:
return fnan
return x
# F(2^n) ~= 2^(2^n)
size = abs(exp+bc)
if exp >= 0:
# Exact
if size < 10 or size <= bitcount(prec):
return from_int(ifib(to_int(x)), prec, rnd)
# Use the modified Binet formula
wp = prec + size + 20
a = mpf_phi(wp)
b = mpf_add(mpf_shift(a, 1), fnone, wp)
u = mpf_pow(a, x, wp)
v = mpf_cos_pi(x, wp)
v = mpf_div(v, u, wp)
u = mpf_sub(u, v, wp)
u = mpf_div(u, b, prec, rnd)
return u
#-------------------------------------------------------------------------------
# Exponential-type functions
#-------------------------------------------------------------------------------
def exponential_series(x, prec, type=0):
"""
Taylor series for cosh/sinh or cos/sin.
type = 0 -- returns exp(x) (slightly faster than cosh+sinh)
type = 1 -- returns (cosh(x), sinh(x))
type = 2 -- returns (cos(x), sin(x))
"""
if x < 0:
x = -x
sign = 1
else:
sign = 0
r = int(0.5*prec**0.5)
xmag = bitcount(x) - prec
r = max(0, xmag + r)
extra = 10 + 2*max(r,-xmag)
wp = prec + extra
x <<= (extra - r)
one = MPZ_ONE << wp
alt = (type == 2)
if prec < EXP_SERIES_U_CUTOFF:
x2 = a = (x*x) >> wp
x4 = (x2*x2) >> wp
s0 = s1 = MPZ_ZERO
k = 2
while a:
a //= (k-1)*k; s0 += a; k += 2
a //= (k-1)*k; s1 += a; k += 2
a = (a*x4) >> wp
s1 = (x2*s1) >> wp
if alt:
c = s1 - s0 + one
else:
c = s1 + s0 + one
else:
u = int(0.3*prec**0.35)
x2 = a = (x*x) >> wp
xpowers = [one, x2]
for i in xrange(1, u):
xpowers.append((xpowers[-1]*x2)>>wp)
sums = [MPZ_ZERO] * u
k = 2
while a:
for i in xrange(u):
a //= (k-1)*k
if alt and k & 2: sums[i] -= a
else: sums[i] += a
k += 2
a = (a*xpowers[-1]) >> wp
for i in xrange(1, u):
sums[i] = (sums[i]*xpowers[i]) >> wp
c = sum(sums) + one
if type == 0:
s = isqrt_fast(c*c - (one<<wp))
if sign:
v = c - s
else:
v = c + s
for i in xrange(r):
v = (v*v) >> wp
return v >> extra
else:
# Repeatedly apply the double-angle formula
# cosh(2*x) = 2*cosh(x)^2 - 1
# cos(2*x) = 2*cos(x)^2 - 1
pshift = wp-1
for i in xrange(r):
c = ((c*c) >> pshift) - one
# With the abs, this is the same for sinh and sin
s = isqrt_fast(abs((one<<wp) - c*c))
if sign:
s = -s
return (c>>extra), (s>>extra)
def exp_basecase(x, prec):
"""
Compute exp(x) as a fixed-point number. Works for any x,
but for speed should have |x| < 1. For an arbitrary number,
use exp(x) = exp(x-m*log(2)) * 2^m where m = floor(x/log(2)).
"""
if prec > EXP_COSH_CUTOFF:
return exponential_series(x, prec, 0)
r = int(prec**0.5)
prec += r
s0 = s1 = (MPZ_ONE << prec)
k = 2
a = x2 = (x*x) >> prec
while a:
a //= k; s0 += a; k += 1
a //= k; s1 += a; k += 1
a = (a*x2) >> prec
s1 = (s1*x) >> prec
s = s0 + s1
u = r
while r:
s = (s*s) >> prec
r -= 1
return s >> u
def exp_expneg_basecase(x, prec):
"""
Computation of exp(x), exp(-x)
"""
if prec > EXP_COSH_CUTOFF:
cosh, sinh = exponential_series(x, prec, 1)
return cosh+sinh, cosh-sinh
a = exp_basecase(x, prec)
b = (MPZ_ONE << (prec+prec)) // a
return a, b
def cos_sin_basecase(x, prec):
"""
Compute cos(x), sin(x) as fixed-point numbers, assuming x
in [0, pi/2). For an arbitrary number, use x' = x - m*(pi/2)
where m = floor(x/(pi/2)) along with quarter-period symmetries.
"""
if prec > COS_SIN_CACHE_PREC:
return exponential_series(x, prec, 2)
precs = prec - COS_SIN_CACHE_STEP
t = x >> precs
n = int(t)
if n not in cos_sin_cache:
w = t<<(10+COS_SIN_CACHE_PREC-COS_SIN_CACHE_STEP)
cos_t, sin_t = exponential_series(w, 10+COS_SIN_CACHE_PREC, 2)
cos_sin_cache[n] = (cos_t>>10), (sin_t>>10)
cos_t, sin_t = cos_sin_cache[n]
offset = COS_SIN_CACHE_PREC - prec
cos_t >>= offset
sin_t >>= offset
x -= t << precs
cos = MPZ_ONE << prec
sin = x
k = 2
a = -((x*x) >> prec)
while a:
a //= k; cos += a; k += 1; a = (a*x) >> prec
a //= k; sin += a; k += 1; a = -((a*x) >> prec)
return ((cos*cos_t-sin*sin_t) >> prec), ((sin*cos_t+cos*sin_t) >> prec)
def mpf_exp(x, prec, rnd=round_fast):
sign, man, exp, bc = x
if man:
mag = bc + exp
wp = prec + 14
if sign:
man = -man
# TODO: the best cutoff depends on both x and the precision.
if prec > 600 and exp >= 0:
# Need about log2(exp(n)) ~= 1.45*mag extra precision
e = mpf_e(wp+int(1.45*mag))
return mpf_pow_int(e, man<<exp, prec, rnd)
if mag < -wp:
return mpf_perturb(fone, sign, prec, rnd)
# |x| >= 2
if mag > 1:
# For large arguments: exp(2^mag*(1+eps)) =
# exp(2^mag)*exp(2^mag*eps) = exp(2^mag)*(1 + 2^mag*eps + ...)
# so about mag extra bits is required.
wpmod = wp + mag
offset = exp + wpmod
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
lg2 = ln2_fixed(wpmod)
n, t = divmod(t, lg2)
n = int(n)
t >>= mag
else:
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n = 0
man = exp_basecase(t, wp)
return from_man_exp(man, n-wp, prec, rnd)
if not exp:
return fone
if x == fninf:
return fzero
return x
def mpf_cosh_sinh(x, prec, rnd=round_fast, tanh=0):
"""Simultaneously compute (cosh(x), sinh(x)) for real x"""
sign, man, exp, bc = x
if (not man) and exp:
if tanh:
if x == finf: return fone
if x == fninf: return fnone
return fnan
if x == finf: return (finf, finf)
if x == fninf: return (finf, fninf)
return fnan, fnan
mag = exp+bc
wp = prec+14
if mag < -4:
# Extremely close to 0, sinh(x) ~= x and cosh(x) ~= 1
if mag < -wp:
if tanh:
return mpf_perturb(x, 1-sign, prec, rnd)
cosh = mpf_perturb(fone, 0, prec, rnd)
sinh = mpf_perturb(x, sign, prec, rnd)
return cosh, sinh
# Fix for cancellation when computing sinh
wp += (-mag)
# Does exp(-2*x) vanish?
if mag > 10:
if 3*(1<<(mag-1)) > wp:
# XXX: rounding
if tanh:
return mpf_perturb([fone,fnone][sign], 1-sign, prec, rnd)
c = s = mpf_shift(mpf_exp(mpf_abs(x), prec, rnd), -1)
if sign:
s = mpf_neg(s)
return c, s
# |x| > 1
if mag > 1:
wpmod = wp + mag
offset = exp + wpmod
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
lg2 = ln2_fixed(wpmod)
n, t = divmod(t, lg2)
n = int(n)
t >>= mag
else:
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n = 0
a, b = exp_expneg_basecase(t, wp)
# TODO: optimize division precision
cosh = a + (b>>(2*n))
sinh = a - (b>>(2*n))
if sign:
sinh = -sinh
if tanh:
man = (sinh << wp) // cosh
return from_man_exp(man, -wp, prec, rnd)
else:
cosh = from_man_exp(cosh, n-wp-1, prec, rnd)
sinh = from_man_exp(sinh, n-wp-1, prec, rnd)
return cosh, sinh
def mod_pi2(man, exp, mag, wp):
# Reduce to standard interval
if mag > 0:
i = 0
while 1:
cancellation_prec = 20 << i
wpmod = wp + mag + cancellation_prec
pi2 = pi_fixed(wpmod-1)
pi4 = pi2 >> 1
offset = wpmod + exp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n, y = divmod(t, pi2)
if y > pi4:
small = pi2 - y
else:
small = y
if small >> (wp+mag-10):
n = int(n)
t = y >> mag
wp = wpmod - mag
break
i += 1
else:
wp += (-mag)
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
n = 0
return t, n, wp
def mpf_cos_sin(x, prec, rnd=round_fast, which=0, pi=False):
"""
which:
0 -- return cos(x), sin(x)
1 -- return cos(x)
2 -- return sin(x)
3 -- return tan(x)
if pi=True, compute for pi*x
"""
sign, man, exp, bc = x
if not man:
if exp:
c, s = fnan, fnan
else:
c, s = fone, fzero
if which == 0: return c, s
if which == 1: return c
if which == 2: return s
if which == 3: return s
mag = bc + exp
wp = prec + 10
# Extremely small?
if mag < 0:
if mag < -wp:
if pi:
x = mpf_mul(x, mpf_pi(wp))
c = mpf_perturb(fone, 1, prec, rnd)
s = mpf_perturb(x, 1-sign, prec, rnd)
if which == 0: return c, s
if which == 1: return c
if which == 2: return s
if which == 3: return mpf_perturb(x, sign, prec, rnd)
if pi:
if exp >= -1:
if exp == -1:
c = fzero
s = (fone, fnone)[bool(man & 2) ^ sign]
elif exp == 0:
c, s = (fnone, fzero)
else:
c, s = (fone, fzero)
if which == 0: return c, s
if which == 1: return c
if which == 2: return s
if which == 3: return mpf_div(s, c, prec, rnd)
# Subtract nearest half-integer (= mod by pi/2)
n = ((man >> (-exp-2)) + 1) >> 1
man = man - (n << (-exp-1))
mag2 = bitcount(man) + exp
wp = prec + 10 - mag2
offset = exp + wp
if offset >= 0:
t = man << offset
else:
t = man >> (-offset)
t = (t*pi_fixed(wp)) >> wp
else:
t, n, wp = mod_pi2(man, exp, mag, wp)
c, s = cos_sin_basecase(t, wp)
m = n & 3
if m == 1: c, s = -s, c
elif m == 2: c, s = -c, -s
elif m == 3: c, s = s, -c
if sign:
s = -s
if which == 0:
c = from_man_exp(c, -wp, prec, rnd)
s = from_man_exp(s, -wp, prec, rnd)
return c, s
if which == 1:
return from_man_exp(c, -wp, prec, rnd)
if which == 2:
return from_man_exp(s, -wp, prec, rnd)
if which == 3:
return from_rational(s, c, prec, rnd)
def mpf_cos(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1)
def mpf_sin(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2)
def mpf_tan(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 3)
def mpf_cos_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 0, 1)
def mpf_cos_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1, 1)
def mpf_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2, 1)
def mpf_cosh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[0]
def mpf_sinh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[1]
def mpf_tanh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd, tanh=1)
# Low-overhead fixed-point versions
def cos_sin_fixed(x, prec, pi2=None):
if pi2 is None:
pi2 = pi_fixed(prec-1)
n, t = divmod(x, pi2)
n = int(n)
c, s = cos_sin_basecase(t, prec)
m = n & 3
if m == 0: return c, s
if m == 1: return -s, c
if m == 2: return -c, -s
if m == 3: return s, -c
def exp_fixed(x, prec, ln2=None):
if ln2 is None:
ln2 = ln2_fixed(prec)
n, t = divmod(x, ln2)
n = int(n)
v = exp_basecase(t, prec)
if n >= 0:
return v << n
else:
return v >> (-n)
| bsd-3-clause | 501419216ae7c4dad71305f40420f20c | 29.681077 | 83 | 0.497609 | 3.05124 | false | false | false | false |
mattpap/sympy-polys | sympy/ntheory/residue.py | 4 | 1679 | from sympy.core.numbers import igcd
from primetest import isprime
def totient_(n):
"""returns the number of integers less than n
and relatively prime to n"""
if n < 1:
raise ValueError("n must be a positive integer")
tot=0
for x in xrange(1,n):
if igcd(x,n)==1:
tot+=1
return tot
def n_order(a,n):
""" returns the order of a modulo n
Order of a modulo n is the smallest integer
k such that a^k leaves a remainder of 1 with n.
"""
assert igcd(a,n)==1
if a>n : a=a%n
for x in xrange(1,totient_(n)+1):
if (a**x)%n==1:
return x
def is_primitive_root(a,p):
"""
returns True if a is a primitive root of p
"""
assert igcd(a,p) == 1,"The two numbers should be relatively prime"
if a>p:
a=a%p
if n_order(a,p)==totient_(p):
return True
else:
return False
def is_quad_residue(a,p):
"""
returns True if a is a quadratic residue of p
p should be a prime and a should be relatively
prime to p
"""
assert isprime(p) and p!=2,"p should be an odd prime"
assert igcd(a,p)==1,"The two numbers should be relatively prime"
if a>p:
a=a%p
rem=(a**((p-1)//2))%p # a^(p-1 / 2) % p
if rem==1: return True
else : return False
def legendre_symbol(a,p):
"""
return 1 if a is a quadratic residue of p
else return -1
p should be an odd prime by definition
"""
assert isprime(p) and p!=2,"p should be an odd prime"
assert igcd(a,p)==1,"The two numbers should be relatively prime"
if a>p:
a=a%p
if is_quad_residue(a,p)==True: return 1
else : return -1
| bsd-3-clause | 552e7f296d8838cdbd9a31ba38a0da4c | 25.650794 | 70 | 0.584276 | 3.173913 | false | false | false | false |
mattpap/sympy-polys | sympy/polys/monomialtools.py | 2 | 5819 | """Tools and arithmetics for monomials of distributed polynomials. """
from sympy.core.mul import Mul
from sympy.core.basic import S
from sympy.functions import factorial
from sympy.utilities import all, any
from sympy.utilities import cythonized
def monomials(variables, degree):
"""Generate a set of monomials of the given total degree or less.
Given a set of variables `V` and a total degree `N` generate a set
of monomials of degree at most `N`. The total number of monomials
is defined as `(#V + N)! / (#V! N!)`, so is huge.
For example if we would like to generate a dense polynomial of
a total degree `N = 50` in 5 variables, assuming that exponents
and all of coefficients are 32-bit long and stored in an array
we would need almost 80 GiB of memory! Fortunately most
polynomials, that we will encounter, are sparse.
For example consider monomials in variables `x` and `y`::
>>> from sympy import monomials
>>> from sympy.abc import x, y
>>> sorted(monomials([x, y], 2))
[1, x, y, x**2, y**2, x*y]
>>> sorted(monomials([x, y], 3))
[1, x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, y*x**2]
"""
if not variables:
return set([S.One])
else:
x, tail = variables[0], variables[1:]
monoms = monomials(tail, degree)
for i in range(1, degree+1):
monoms |= set([ x**i * m for m in monomials(tail, degree-i) ])
return monoms
def monomial_count(V, N):
"""Computes the number of monomials of degree `N` in `#V` variables.
The number of monomials is given as `(#V + N)! / (#V! N!)`, e.g.::
>>> from sympy import monomials, monomial_count
>>> from sympy.abc import x, y
>>> monomial_count(2, 2)
6
>>> M = monomials([x, y], 2)
>>> sorted(M)
[1, x, y, x**2, y**2, x*y]
>>> len(M)
6
"""
return factorial(V + N) / factorial(V) / factorial(N)
def monomial_lex_cmp(a, b):
return cmp(a, b)
def monomial_grlex_cmp(a, b):
return cmp(sum(a), sum(b)) or cmp(a, b)
def monomial_grevlex_cmp(a, b):
return cmp(sum(a), sum(b)) or cmp(tuple(reversed(b)), tuple(reversed(a)))
_monomial_order = {
'lex' : monomial_lex_cmp,
'grlex' : monomial_grlex_cmp,
'grevlex' : monomial_grevlex_cmp,
}
def monomial_cmp(order):
"""Returns a function defining admissible order on monomials.
Currently supported orderings are:
1. lex - lexicographic order
2. grlex - graded lexicographic order
3. grevlex - reversed graded lexicographic order
"""
try:
return _monomial_order[order]
except KeyError:
raise ValueError("expected valid monomial order, got %s" % order)
@cythonized("a,b")
def monomial_mul(A, B):
"""Multiplication of tuples representing monomials.
Lets multiply `x**3*y**4*z` with `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_mul
>>> monomial_mul((3, 4, 1), (1, 2, 0))
(4, 6, 1)
which gives `x**4*y**5*z`.
"""
return tuple([ a + b for a, b in zip(A, B) ])
@cythonized("a,b,c")
def monomial_div(A, B):
"""Division of tuples representing monomials.
Lets divide `x**3*y**4*z` by `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_div
>>> monomial_div((3, 4, 1), (1, 2, 0))
(2, 2, 1)
which gives `x**2*y**2*z`. However::
>>> monomial_div((3, 4, 1), (1, 2, 2)) is None
True
`x*y**2*z**2` does not divide `x**3*y**4*z`.
"""
C = [ a - b for a, b in zip(A, B) ]
if all([ c >= 0 for c in C ]):
return tuple(C)
else:
return None
@cythonized("a,b")
def monomial_gcd(A, B):
"""Greatest common divisor of tuples representing monomials.
Lets compute GCD of `x**3*y**4*z` and `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_gcd
>>> monomial_gcd((3, 4, 1), (1, 2, 0))
(1, 2, 0)
which gives `x*y**2`.
"""
return tuple([ min(a, b) for a, b in zip(A, B) ])
@cythonized("a,b")
def monomial_lcm(A, B):
"""Least common multiple of tuples representing monomials.
Lets compute LCM of `x**3*y**4*z` and `x*y**2`::
>>> from sympy.polys.monomialtools import monomial_lcm
>>> monomial_lcm((3, 4, 1), (1, 2, 0))
(3, 4, 1)
which gives `x**3*y**4*z`.
"""
return tuple([ max(a, b) for a, b in zip(A, B) ])
@cythonized("i,n")
def monomial_max(*monoms):
"""Returns maximal degree for each variable in a set of monomials.
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the maximal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomialtools import monomial_max
>>> monomial_max((3,4,5), (0,5,1), (6,3,9))
(6, 5, 9)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = max(M[i], n)
return tuple(M)
@cythonized("i,n")
def monomial_min(*monoms):
"""Returns minimal degree for each variable in a set of monomials.
Consider monomials `x**3*y**4*z**5`, `y**5*z` and `x**6*y**3*z**9`.
We wish to find out what is the minimal degree for each of `x`, `y`
and `z` variables::
>>> from sympy.polys.monomialtools import monomial_min
>>> monomial_min((3,4,5), (0,5,1), (6,3,9))
(0, 3, 1)
"""
M = list(monoms[0])
for N in monoms[1:]:
for i, n in enumerate(N):
M[i] = min(M[i], n)
return tuple(M)
| bsd-3-clause | 540b3e935e7e57724b1a5ad52193af20 | 25.939815 | 77 | 0.550266 | 3.073957 | false | false | false | false |
mattpap/sympy-polys | sympy/functions/combinatorial/factorials.py | 2 | 12073 | from sympy.core.basic import S, C, sympify
from sympy.core.function import Function
from sympy.ntheory import sieve
from math import sqrt
###############################################################################
######################## FACTORIAL and MULTI-FACTORIAL ########################
###############################################################################
class Factorial(Function):
"""Implementation of factorial function over nonnegative integers.
For the sake of convenience and simplicity of procedures using
this function it is defined for negative integers and returns
zero in this case.
The factorial is very important in combinatorics where it gives
the number of ways in which 'n' objects can be permuted. It also
arises in calculus, probability, number theory etc.
There is strict relation of factorial with gamma function. In
fact n! = gamma(n+1) for nonnegative integers. Rewrite of this
kind is very useful in case of combinatorial simplification.
Computation of the factorial is done using two algorithms. For
small arguments naive product is evaluated. However for bigger
input algorithm Prime-Swing is used. It is the fastest algorithm
known and computes n! via prime factorization of special class
of numbers, called here the 'Swing Numbers'.
>>> from sympy import Symbol, factorial
>>> n = Symbol('n', integer=True)
>>> factorial(-2)
0
>>> factorial(0)
1
>>> factorial(7)
5040
>>> factorial(n)
n!
>>> factorial(2*n)
(2*n)!
"""
nargs = 1
_small_swing = [
1,1,1,3,3,15,5,35,35,315,63,693,231,3003,429,6435,6435,109395,
12155,230945,46189,969969,88179,2028117,676039,16900975,1300075,
35102025,5014575,145422675,9694845,300540195,300540195
]
@classmethod
def _swing(cls, n):
if n < 33:
return cls._small_swing[n]
else:
N, primes = int(sqrt(n)), []
for prime in sieve.primerange(3, N+1):
p, q = 1, n
while True:
q //= prime
if q > 0:
if q & 1 == 1:
p *= prime
else:
break
if p > 1:
primes.append(p)
for prime in sieve.primerange(N+1, n//3 + 1):
if (n // prime) & 1 == 1:
primes.append(prime)
L_product = R_product = 1
for prime in sieve.primerange(n//2 + 1, n+1):
L_product *= prime
for prime in primes:
R_product *= prime
return L_product*R_product
@classmethod
def _recursive(cls, n):
if n < 2:
return 1
else:
return (cls._recursive(n//2)**2)*cls._swing(n)
@classmethod
def eval(cls, n):
n = sympify(n)
if n.is_Number:
if n is S.Zero:
return S.One
elif n.is_Integer:
if n.is_negative:
return S.Zero
else:
n, result = n.p, 1
if n < 20:
for i in range(2, n+1):
result *= i
else:
N, bits = n, 0
while N != 0:
if N & 1 == 1:
bits += 1
N = N >> 1
result = cls._recursive(n)*2**(n-bits)
return C.Integer(result)
if n.is_integer:
if n.is_negative:
return S.Zero
else:
return C.gamma(n+1)
@classmethod # ?
def _eval_rewrite_as_gamma(self, arg):
return C.gamma(1 + arg)
def _eval_is_integer(self):
return self.args[0].is_integer
class MultiFactorial(Function):
pass
factorial = Factorial
###############################################################################
######################## RISING and FALLING FACTORIALS ########################
###############################################################################
class RisingFactorial(Function):
"""Rising factorial (also called Pochhammer symbol) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
rf(x, k) = x * (x+1) * ... * (x + k-1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/RisingFactorial.html page.
>>> from sympy import rf
>>> from sympy.abc import x
>>> rf(x, 0)
1
>>> rf(1, 5)
120
>>> rf(x, 5) == x*(1 + x)*(2 + x)*(3 + x)*(4 + x)
True
"""
nargs = 2
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif x is S.One:
return factorial(k)
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x+i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x-i), xrange(1, abs(int(k))+1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return C.gamma(x + k) / C.gamma(x)
class FallingFactorial(Function):
"""Falling factorial (related to rising factorial) is a double valued
function arising in concrete mathematics, hypergeometric functions
and series expansions. It is defined by
ff(x, k) = x * (x-1) * ... * (x - k+1)
where 'x' can be arbitrary expression and 'k' is an integer. For
more information check "Concrete mathematics" by Graham, pp. 66
or visit http://mathworld.wolfram.com/FallingFactorial.html page.
>>> from sympy import ff
>>> from sympy.abc import x
>>> ff(x, 0)
1
>>> ff(5, 5)
120
>>> ff(x, 5) == x*(x-1)*(x-2)*(x-3)*(x-4)
True
"""
nargs = 2
@classmethod
def eval(cls, x, k):
x = sympify(x)
k = sympify(k)
if x is S.NaN:
return S.NaN
elif k.is_Integer:
if k is S.NaN:
return S.NaN
elif k is S.Zero:
return S.One
else:
result = S.One
if k.is_positive:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
if k.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
else:
return reduce(lambda r, i: r*(x-i), xrange(0, int(k)), 1)
else:
if x is S.Infinity:
return S.Infinity
elif x is S.NegativeInfinity:
return S.Infinity
else:
return 1/reduce(lambda r, i: r*(x+i), xrange(1, abs(int(k))+1), 1)
def _eval_rewrite_as_gamma(self, x, k):
return (-1)**k * C.gamma(-x + k) / C.gamma(-x)
rf = RisingFactorial
ff = FallingFactorial
###############################################################################
########################### BINOMIAL COEFFICIENTS #############################
###############################################################################
class Binomial(Function):
"""Implementation of the binomial coefficient. It can be defined
in two ways depending on its desired interpretation:
C(n,k) = n!/(k!(n-k)!) or C(n, k) = ff(n, k)/k!
First, in a strict combinatorial sense it defines the
number of ways we can choose 'k' elements from a set of
'n' elements. In this case both arguments are nonnegative
integers and binomial is computed using an efficient
algorithm based on prime factorization.
The other definition is generalization for arbitrary 'n',
however 'k' must also be nonnegative. This case is very
useful when evaluating summations.
For the sake of convenience for negative 'k' this function
will return zero no matter what valued is the other argument.
>>> from sympy import Symbol, Rational, binomial
>>> n = Symbol('n', integer=True)
>>> binomial(15, 8)
6435
>>> binomial(n, -1)
0
>>> [ binomial(0, i) for i in range(1)]
[1]
>>> [ binomial(1, i) for i in range(2)]
[1, 1]
>>> [ binomial(2, i) for i in range(3)]
[1, 2, 1]
>>> [ binomial(3, i) for i in range(4)]
[1, 3, 3, 1]
>>> [ binomial(4, i) for i in range(5)]
[1, 4, 6, 4, 1]
>>> binomial(Rational(5,4), 3)
-5/128
>>> binomial(n, 3)
n*(1 - n)*(2 - n)/6
"""
nargs = 2
@classmethod
def eval(cls, r, k):
r, k = map(sympify, (r, k))
if k.is_Number:
if k is S.Zero:
return S.One
elif k.is_Integer:
if k.is_negative:
return S.Zero
else:
if r.is_Integer and r.is_nonnegative:
r, k = int(r), int(k)
if k > r:
return S.Zero
elif k > r // 2:
k = r - k
M, result = int(sqrt(r)), 1
for prime in sieve.primerange(2, r+1):
if prime > r - k:
result *= prime
elif prime > r // 2:
continue
elif prime > M:
if r % prime < k % prime:
result *= prime
else:
R, K = r, k
exp = a = 0
while R > 0:
a = int((R % prime) < (K % prime + a))
R, K = R // prime, K // prime
exp = a + exp
if exp > 0:
result *= prime**exp
return C.Integer(result)
else:
result = r - k + 1
for i in xrange(2, k+1):
result *= r-k+i
result /= i
return result
if k.is_integer:
if k.is_negative:
return S.Zero
else:
return C.gamma(r+1)/(C.gamma(r-k+1)*C.gamma(k+1))
def _eval_rewrite_as_gamma(self, r, k):
return C.gamma(r+1) / (C.gamma(r-k+1)*C.gamma(k+1))
def _eval_is_integer(self):
return self.args[0].is_integer and self.args[1].is_integer
binomial = Binomial
| bsd-3-clause | 3737c1cf6d51ac718e32b919aba3d068 | 29.032338 | 90 | 0.434275 | 4.276656 | false | false | false | false |
mattpap/sympy-polys | sympy/utilities/pkgdata.py | 12 | 1759 | """
pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return file(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
import sys
import os
from cStringIO import StringIO
def get_resource(identifier, pkgname=__name__):
"""
Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource can not be found.
For example::
mydata = get_esource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise IOError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except IOError:
pass
else:
return StringIO(data)
return file(os.path.normpath(path), 'rb')
| bsd-3-clause | 3c279ccde09eceb937ad9ef4e35e140a | 31.574074 | 75 | 0.664582 | 3.952809 | false | false | false | false |
wtforms/wtforms | tests/validators/test_optional.py | 1 | 1185 | import pytest
from wtforms.validators import optional
from wtforms.validators import StopValidation
def test_input_optional_passes(dummy_form, dummy_field):
"""
optional should pause with given values
"""
validator = optional()
dummy_field.data = "foobar"
dummy_field.raw_data = ["foobar"]
validator(dummy_form, dummy_field)
@pytest.mark.parametrize("data_v, raw_data_v", [("", ""), (" ", " "), ("\t", "\t")])
def test_input_optional_raises(data_v, raw_data_v, dummy_form, dummy_field):
"""
optional should stop the validation chain if the data are not given
errors should be erased because the value is optional
white space should be considered as empty string too
"""
validator = optional()
dummy_field.data = data_v
dummy_field.raw_data = raw_data_v
with pytest.raises(StopValidation):
validator(dummy_form, dummy_field)
assert validator.field_flags == {"optional": True}
dummy_field.errors = ["Invalid Integer Value"]
assert len(dummy_field.errors) == 1
with pytest.raises(StopValidation):
validator(dummy_form, dummy_field)
assert len(dummy_field.errors) == 0
| bsd-3-clause | c3719ac4d18e5300b7115f4eaf419679 | 31.027027 | 88 | 0.670886 | 3.859935 | false | true | false | false |
wtforms/wtforms | tests/test_locale_babel.py | 1 | 2853 | from decimal import Decimal
from decimal import ROUND_UP
import pytest
from tests.common import DummyPostData
from wtforms import Form
from wtforms.fields import DecimalField
from wtforms.utils import unset_value
class TestLocaleDecimal:
class F(Form):
class Meta:
locales = ["hi_IN", "en_US"]
a = DecimalField(use_locale=True)
def _format_test(self, expected, val, locales=unset_value):
meta = None
if locales is not unset_value:
meta = {"locales": locales}
form = self.F(meta=meta, a=Decimal(val))
assert form.a._value() == expected
def test_typeerror(self):
def build(**kw):
form = self.F()
DecimalField(
use_locale=True,
name="a",
_form=form,
_translations=form.meta.get_translations(form),
**kw
)
with pytest.raises(TypeError):
build(places=2)
with pytest.raises(TypeError):
build(rounding=ROUND_UP)
def test_formatting(self):
val = Decimal("123456.789")
neg = Decimal("-5.2")
self._format_test("1,23,456.789", val)
self._format_test("-12,52,378.2", "-1252378.2")
self._format_test("123,456.789", val, ["en_US"])
self._format_test("-5.2", neg, ["en_US"])
self._format_test("123.456,789", val, ["es_ES"])
self._format_test("123.456,789", val, ["de_DE"])
self._format_test("-5,2", neg, ["de_DE"])
self._format_test("-12’345.2", "-12345.2", ["de_CH"])
def _parse_test(self, raw_val, expected, locales=unset_value):
meta = None
if locales is not unset_value:
meta = {"locales": locales}
form = self.F(DummyPostData(a=raw_val), meta=meta)
if not form.validate():
raise AssertionError(
"Expected value %r to parse as a decimal, instead got %r"
% (raw_val, form.a.errors)
)
assert form.a.data == expected
def _fail_parse(self, raw_val, expected_error, locales=unset_value):
meta = None
if locales is not unset_value:
meta = {"locales": locales}
form = self.F(DummyPostData(a=raw_val), meta=meta)
assert not form.validate()
assert form.a.errors[0] == expected_error
def test_parsing(self):
expected = Decimal("123456.789")
self._parse_test("1,23,456.789", expected)
self._parse_test("1,23,456.789", expected, ["en_US"])
self._parse_test("1.23.456,789", expected, ["de_DE"])
self._parse_test("1’23’456.789", expected, ["de_CH"])
self._fail_parse("1,23,456.5", "Keine g\xfcltige Dezimalzahl.", ["de_DE"])
self._fail_parse("1.234.567,5", "Not a valid decimal value.", ["en_US"])
| bsd-3-clause | 2de1b693feee81957ca74422f2c3d9ab | 33.301205 | 82 | 0.560941 | 3.549875 | false | true | false | false |
nteract/papermill | docs/conf.py | 1 | 5193 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# papermill documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 15 09:50:01 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '3.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'myst_parser',
'sphinx_copybutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md', '.ipynb']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'papermill'
copyright = '2018, nteract team'
author = 'nteract team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import papermill
# The short X.Y version.
version = '.'.join(papermill.__version__.split('.')[0:2])
# The full version, including alpha/beta/rc tags.
release = papermill.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line foexitr these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'UPDATE.md']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"sidebar_hide_name": True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_logo = "_static/images/papermill.png"
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'papermilldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'papermill.tex', 'papermill Documentation', 'nteract team', 'manual')
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, 'papermill', 'papermill Documentation', [author], 1)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'papermill',
'papermill Documentation',
author,
'papermill',
'One line description of project.',
'Miscellaneous',
)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| bsd-3-clause | 526cd7d36ab7a1a08abb6e933cbfe91e | 30.283133 | 86 | 0.672636 | 3.878267 | false | true | false | false |
nteract/papermill | papermill/clientwrap.py | 1 | 4348 | import sys
import asyncio
from nbclient import NotebookClient
from nbclient.exceptions import CellExecutionError
from traitlets import Bool, Instance
class PapermillNotebookClient(NotebookClient):
"""
Module containing a that executes the code cells
and updates outputs
"""
log_output = Bool(False).tag(config=True)
stdout_file = Instance(object, default_value=None).tag(config=True)
stderr_file = Instance(object, default_value=None).tag(config=True)
def __init__(self, nb_man, km=None, raise_on_iopub_timeout=True, **kw):
"""Initializes the execution manager.
Parameters
----------
nb_man : NotebookExecutionManager
Notebook execution manager wrapper being executed.
km : KernerlManager (optional)
Optional kernel manager. If none is provided, a kernel manager will
be created.
"""
super().__init__(nb_man.nb, km=km, raise_on_iopub_timeout=raise_on_iopub_timeout, **kw)
self.nb_man = nb_man
def execute(self, **kwargs):
"""
Wraps the parent class process call slightly
"""
self.reset_execution_trackers()
# See https://bugs.python.org/issue37373 :(
if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith('win'):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
with self.setup_kernel(**kwargs):
self.log.info("Executing notebook with kernel: %s" % self.kernel_name)
self.papermill_execute_cells()
info_msg = self.wait_for_reply(self.kc.kernel_info())
self.nb.metadata['language_info'] = info_msg['content']['language_info']
self.set_widgets_metadata()
return self.nb
def papermill_execute_cells(self):
"""
This function replaces cell execution with it's own wrapper.
We are doing this for the following reasons:
1. Notebooks will stop executing when they encounter a failure but not
raise a `CellException`. This allows us to save the notebook with the
traceback even though a `CellExecutionError` was encountered.
2. We want to write the notebook as cells are executed. We inject our
logic for that here.
3. We want to include timing and execution status information with the
metadata of each cell.
"""
# Execute each cell and update the output in real time.
for index, cell in enumerate(self.nb.cells):
try:
self.nb_man.cell_start(cell, index)
self.execute_cell(cell, index)
except CellExecutionError as ex:
self.nb_man.cell_exception(self.nb.cells[index], cell_index=index, exception=ex)
break
finally:
self.nb_man.cell_complete(self.nb.cells[index], cell_index=index)
def log_output_message(self, output):
"""
Process a given output. May log it in the configured logger and/or write it into
the configured stdout/stderr files.
:param output: nbformat.notebooknode.NotebookNode
:return:
"""
if output.output_type == "stream":
content = "".join(output.text)
if output.name == "stdout":
if self.log_output:
self.log.info(content)
if self.stdout_file:
self.stdout_file.write(content)
self.stdout_file.flush()
elif output.name == "stderr":
if self.log_output:
# In case users want to redirect stderr differently, pipe to warning
self.log.warning(content)
if self.stderr_file:
self.stderr_file.write(content)
self.stderr_file.flush()
elif self.log_output and ("data" in output and "text/plain" in output.data):
self.log.info("".join(output.data['text/plain']))
def process_message(self, *arg, **kwargs):
output = super().process_message(*arg, **kwargs)
self.nb_man.autosave_cell()
if output and (self.log_output or self.stderr_file or self.stdout_file):
self.log_output_message(output)
return output
| bsd-3-clause | 0ca712c2b826b7206b6085a464fb0067 | 38.527273 | 100 | 0.606946 | 4.229572 | false | false | false | false |
datreant/datreant | src/datreant/tests/test_collections.py | 3 | 49853 | """Tests for Bundle.
"""
import numpy as np
import pytest
import datreant as dtr
def do_stuff(treant):
return treant.name
def return_nothing(treant):
b = treant.name
class CollectionsTests(object):
"""Mixin tests for collections"""
def test_parents(self, collection, tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('free-associate/lark')
t2 = dtr.Treant('free-associate/hark')
t3 = dtr.Treant('characters/linus')
col = collection(t1, t2, t3)
assert len(col.parents()) == 2
assert 'free-associate' in col.parents().names
assert 'linus' not in col.parents().names
assert 'characters' in col.parents().names
class TestGetitem(object):
@pytest.mark.parametrize('slx', (
[1, 2],
np.array([1, 2]),
))
def test_fancy_index(self, filled_collection, slx):
b, (t1, t2, t3) = filled_collection
sl = b[slx]
assert len(sl) == 2
assert t2 == sl[0]
assert t3 == sl[1]
@pytest.mark.parametrize('slx', (
[False, False, True],
np.array([False, False, True]),
))
def test_boolean_index(self, filled_collection, slx):
b, (t1, t2, t3) = filled_collection
sl = b[slx]
assert len(sl) == 1
assert t3 == sl[0]
@pytest.mark.parametrize('slx', (
slice(0, 1, None),
slice(1, None, None),
slice(None, None, -1),
slice(None, None, 2),
))
def test_slice_index(self, filled_collection, slx):
b, ts = filled_collection
sl = b[slx]
ref = ts[slx]
for x, y in zip(sl, ref):
assert x == y
def test_getitem_IE(self, filled_collection):
bundle = filled_collection[0]
with pytest.raises(IndexError):
bundle[4.0]
class TestSetOperations(object):
def test_sub_single(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1, 2]]
b2 = b[1]
b3 = b1 - b2
assert len(b3) == 2
assert t1 in b3
assert t2 not in b3
assert t3 in b3
def test_sub_many(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b2 = b[[1, 2]]
b3 = b1 - b2
assert len(b3) == 1
assert t1 in b3
assert t2 not in b3
def test_or(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b2 = b[[1, 2]]
b3 = b1 | b2
assert t1 in b3
assert t2 in b3
assert t3 in b3
def test_and(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b2 = b[[1, 2]]
b3 = b1 & b2
assert t1 not in b3
def test_xor(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b2 = b[[1, 2]]
b3 = b1 ^ b2
assert len(b3) == 2
assert t1 in b3
assert t2 not in b3
assert t3 in b3
def test_sub_TypeError(self, filled_collection):
b = filled_collection[0]
with pytest.raises(TypeError):
b - ['this']
def test_or_TypeError(self, filled_collection):
b = filled_collection[0]
with pytest.raises(TypeError):
b | ['this']
def test_and_TypeError(self, filled_collection):
b = filled_collection[0]
with pytest.raises(TypeError):
b & ['this']
def test_xor_TypeError(self, filled_collection):
b = filled_collection[0]
with pytest.raises(TypeError):
b ^ ['this']
class TestAddition(object):
def test_add_many(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b2 = b[[1, 2]]
b3 = b1 + b2
assert len(b3) == 3
assert t1 in b3
assert t2 in b3
assert t3 in b3
def test_add_singular(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b2 = b[2]
b3 = b1 + b2
assert len(b3) == 3
assert t1 in b3
assert t2 in b3
assert t3 in b3
def test_add(self, filled_collection):
b = filled_collection[0]
with pytest.raises(TypeError):
b + 25
class TestView(CollectionsTests):
"""Tests for Views"""
@pytest.fixture
def collection(self):
return dtr.View
@pytest.fixture
def filled_collection(self, tmpdir):
# returns (a bundle of [t1, t2, t3], then individal references to each)
with tmpdir.as_cwd():
t1 = dtr.Tree('larry')
t2 = dtr.Leaf('curly')
t3 = dtr.Treant('moe')
b = dtr.View(t1, t2, t3)
return b, (t1, t2, t3)
class TestGetitem(CollectionsTests.TestGetitem):
def test_getitem_name_string(self, filled_collection):
b, (t1, t2, t3) = filled_collection
n = t1.name
b_new = b[n]
assert isinstance(b_new, dtr.View)
assert b_new[0] == t1
class TestAddition(CollectionsTests.TestAddition):
def test_tree_addition(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[1, 2]]
b3 = b1 + t1
assert len(b3) == 3
assert isinstance(b3, dtr.View)
assert t1 in b3
def test_leaf_addition(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 2]]
b3 = b1 + t2
assert len(b3) == 3
assert isinstance(b3, dtr.View)
assert t2 in b3
def test_treant_addition(self, filled_collection):
b, (t1, t2, t3) = filled_collection
b1 = b[[0, 1]]
b3 = b1 + t3
assert len(b3) == 3
assert isinstance(b3, dtr.View)
assert t2 in b3
def test_exists(self, collection, tmpdir):
pass
class TestBundle(CollectionsTests):
"""Tests for elements of Bundle"""
@pytest.fixture
def collection(self):
return dtr.Bundle
@pytest.fixture
def filled_collection(self, tmpdir):
# returns (a bundle of [t1, t2, t3], then individal references to each)
with tmpdir.as_cwd():
t1 = dtr.Treant('larry')
t2 = dtr.Treant('curly')
t3 = dtr.Treant('moe')
b = dtr.Bundle((t1, t2, t3))
return b, (t1, t2, t3)
@pytest.fixture
def testtreant(self, tmpdir, request):
with tmpdir.as_cwd():
t = dtr.Treant('dummytreant')
return t
@pytest.fixture
def testtreant2(self, tmpdir, request):
with tmpdir.as_cwd():
t = dtr.Treant('dummytreant2')
return t
def test_additive(self, tmpdir, testtreant, testtreant2, collection):
"""Test that addition of treants and collections give Bundles.
"""
with tmpdir.as_cwd():
assert isinstance(testtreant + testtreant2, dtr.Bundle)
assert len(testtreant + testtreant2) == 2
b = collection() + testtreant + testtreant2
# beating a dead horse
assert len(b) == 2
class TestGetitem(CollectionsTests.TestGetitem):
def test_getitem_name_string(self, filled_collection):
b, (t1, t2, t3) = filled_collection
n = t1.name
b_new = b[n]
assert isinstance(b_new, dtr.Bundle)
assert b_new[0] == t1
def test_getitem_string_KeyError(self, filled_collection):
b = filled_collection[0]
with pytest.raises(KeyError):
b['not there']
def test_get_members(self, collection, tmpdir):
"""Access members with indexing and slicing"""
with tmpdir.as_cwd():
t1 = dtr.Treant('larry')
t2 = dtr.Treant('curly')
t3 = dtr.Treant('moe')
col = collection([[[t1, [t2, [t3]]]]])
assert col[1] == t2
t4 = dtr.treants.Treant('shemp')
col = col + t4
for member in (t1, t2, t3):
assert member in col[:3]
assert t4 not in col[:3]
assert t4 == col[-1]
def test_member_attributes(self, collection, tmpdir):
"""Get member names and abspaths"""
with tmpdir.as_cwd():
t1 = dtr.Treant('bigger')
t2 = dtr.Treant('faster')
t3 = dtr.Treant('stronger')
col = collection(t1, t2, t3)
names = [treant.name for treant in [t1, t2, t3]]
assert col.names == names
abspaths = [treant.abspath for treant in [t1, t2, t3]]
assert col.abspaths == abspaths
def test_map(self, collection, tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('lark')
t2 = dtr.Treant('hark')
t3 = dtr.Treant('linus')
col = collection(t1, t2, t3)
comp = [cont.name for cont in col]
assert col.map(do_stuff) == comp
assert col.map(do_stuff, processes=2) == comp
assert col.map(return_nothing) is None
assert col.map(return_nothing, processes=2) is None
class TestGet:
@pytest.fixture
def filled_bundle(self, tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('one')
t2 = dtr.Treant('two')
t3 = dtr.Treant('three')
t4 = dtr.Treant('four')
t1.tags.add('odd', 'one')
t2.tags.add('even', 'two')
t3.tags.add('odd', 'three')
t4.tags.add('even', 'four')
t1.categories.add(is_even=False, value=1)
t2.categories.add(is_even=True, value=2)
t3.categories.add(is_even=False, value=3)
t4.categories.add(is_even=True, value=4)
return [t1, t2, t3, t4], dtr.Bundle([t1, t2, t3, t4])
def test_get_blank(self, filled_bundle):
ref, b = filled_bundle
new = b.get()
assert new == b
def test_get_cats(self, filled_bundle):
ref, b = filled_bundle
new = b.get(is_even=True)
assert len(new) == 2
assert ref[1] in new
assert ref[3] in new
def test_get_cats_double(self, filled_bundle):
ref, b = filled_bundle
new = b.get(is_even=True, value=4)
assert len(new) == 1
assert ref[3] in new
def test_get_cats_empty(self, filled_bundle):
ref, b = filled_bundle
new = b.get(is_even=True, value=1)
assert len(new) == 0
def test_get_cats_KeyError(self, filled_bundle):
ref, b = filled_bundle
new = b.get(colour='yellow')
assert len(new) == 0
def test_get_tags(self, filled_bundle):
ref, b = filled_bundle
new = b.get('odd')
assert len(new) == 2
assert ref[0] in new
assert ref[2] in new
def test_get_tags_many(self, filled_bundle):
ref, b = filled_bundle
new = b.get('odd', 'three')
assert len(new) == 1
assert ref[2] in new
def test_get_tags_empty(self, filled_bundle):
ref, b = filled_bundle
new = b.get('magical')
assert len(new) == 0
def test_get_cat_and_tag(self, filled_bundle):
ref, b = filled_bundle
new = b.get('odd', value=3)
assert len(new) == 1
assert ref[2] in new
class TestAggTags:
"""Test behavior of manipulating tags collectively.
"""
def test_add_tags(self, collection, testtreant, testtreant2, tmpdir):
with tmpdir.as_cwd():
col = collection(testtreant, testtreant2)
assert len(col.tags) == 0
col.tags.add('broiled', 'not baked')
assert len(col.tags) == 2
for tag in ('broiled', 'not baked'):
assert tag in col.tags
def test_tags_setting(self, collection, testtreant,
testtreant2, tmpdir):
with tmpdir.as_cwd():
col = collection(testtreant, testtreant2)
assert len(col.tags) == 0
# add as list
col.tags = ['broiled', 'not baked']
assert len(col.tags) == 2
for tag in ('broiled', 'not baked'):
assert tag in col.tags
col.tags.clear()
# add as set
col.tags = {'broiled', 'not baked'}
assert len(col.tags) == 2
for tag in ('broiled', 'not baked'):
assert tag in col.tags
col.tags.clear()
# add as Tags
t = dtr.Treant('mark twain')
t.tags.add('literature', 'quotables')
col.tags = t.tags
assert len(col.tags) == 2
for tag in ('literature', 'quotables'):
assert tag in col.tags
def test_tags_all(self, collection, tmpdir):
with tmpdir.as_cwd():
moe = dtr.Treant('moe',
tags=['smartest', 'mean', 'stooge'])
larry = dtr.Treant('larry',
tags=['weird', 'means well', 'stooge'])
curly = dtr.Treant('curly',
tags=['dumb', 'nyuk-nyuk', 'stooge'])
col = collection(moe, larry, curly)
assert len(col.tags.all) == 1
assert 'stooge' in col.tags.all
def test_tags_any(self, collection, testtreant, testtreant2, tmpdir):
with tmpdir.as_cwd():
moe = dtr.Treant('moe',
tags=['smartest', 'mean', 'stooge'])
larry = dtr.Treant('larry',
tags=['weird', 'means well', 'stooge'])
curly = dtr.Treant('curly',
tags=['dumb', 'nyuk-nyuk', 'stooge'])
col = collection(moe, larry, curly)
assert len(col.tags.any) == 7
for tag in ('smartest', 'mean', 'weird', 'means well',
'dumb', 'nyuk-nyuk', 'stooge'):
assert tag in col.tags.any
def test_tags_set_behavior(self, collection, tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('pine')
t3 = dtr.Treant('juniper')
t1.tags.add({'tree', 'new jersey', 'deciduous'})
t2.tags.add({'tree', 'new york', 'evergreen'})
t3.tags.add({'shrub', 'new york', 'evergreen'})
col = collection(t1, t2, t3)
trees = dtr.Bundle('maple', 'pine')
evergreens = dtr.Bundle('pine', 'juniper')
tags = col.tags
assert len(tags.any) == 6
# test equality: __eq__ (==)
assert t1.tags == {'tree', 'new jersey', 'deciduous'}
assert t2.tags == {'tree', 'new york', 'evergreen'}
assert t3.tags == {'shrub', 'new york', 'evergreen'}
# test subset: __lt__ (<)
assert not t1.tags < {'tree', 'new jersey', 'deciduous'}
assert tags < {'tree', 'new jersey', 'deciduous'}
assert t1.tags < tags.any
assert t2.tags < tags.any
assert t3.tags < tags.any
# test difference: __sub__ (-)
assert t1.tags - {'tree'} == {'new jersey', 'deciduous'}
assert trees.tags - {'tree'} == set()
# test right difference: __rsub__ (-)
evergreen_ny_shrub = {'evergreen', 'new york', 'shrub'}
dec_nj_sh = {'deciduous', 'new jersey', 'shrub'}
assert tags.any - t1.tags == evergreen_ny_shrub
assert tags.any - evergreens.tags - trees.tags == dec_nj_sh
assert {'tree'} - trees.tags == set()
# test union: __or__ (|)
evergreen_ny_shrub = {'evergreen', 'new york', 'shrub'}
assert evergreens.tags | t3.tags == evergreen_ny_shrub
assert t1.tags | t2.tags | t3.tags == tags.any
# test right union: __ror__ (|)
assert {'shrub'} | evergreens.tags == evergreen_ny_shrub
assert t3.tags | {'tree'} == {'tree'} | t3.tags
# test intersection: __and__ (&)
evergreen_ny = {'evergreen', 'new york'}
assert evergreens.tags & t3.tags == evergreen_ny
assert t1.tags & t2.tags & t3.tags == tags.all
# test right intersection: __rand__ (&)
assert evergreen_ny_shrub & evergreens.tags == evergreen_ny
assert t3.tags & {'shrub'} == {'shrub'} & t3.tags
# test symmetric difference: __xor__ (^)
evergreen_ny_tree = {'evergreen', 'new york', 'tree'}
assert trees.tags ^ evergreens.tags == evergreen_ny_tree
assert evergreens.tags ^ t3.tags == {'shrub'}
assert t1.tags ^ t2.tags ^ t3.tags == dec_nj_sh
# test right symmetric difference: __rxor__ (^)
assert {'new york'} ^ evergreens.tags == {'evergreen'}
assert {'shrub'} ^ trees.tags == t2.tags ^ t3.tags
# type_error_msg = "Operands must be AggTags, Tags, or a set."
with pytest.raises(TypeError) as e:
['tree'] == trees.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
trees.tags < ['tree']
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
['tree'] - trees.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
trees.tags - ['tree']
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
['tree'] | trees.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
trees.tags | ['tree']
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
['tree'] & trees.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
trees.tags & ['tree']
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
['tree'] ^ trees.tags
# assert e.value.message == type_error_msg
with pytest.raises(TypeError) as e:
trees.tags ^ ['tree']
# assert e.value.message == type_error_msg
def test_tags_getitem(self, collection, testtreant,
testtreant2, tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('pine')
t1.tags.add({'tree', 'new jersey', 'deciduous'})
t2.tags.add({'tree', 'new york', 'evergreen'})
col = collection(t1, t2)
tags = col.tags
assert len(tags.any) == 5
# test single tags
assert tags['tree'] == [True, True]
assert tags['deciduous'] == [True, False]
assert tags['evergreen'] == [False, True]
assert tags['new jersey'] == [True, False]
assert tags['new york'] == [False, True]
assert tags[{'tree'}] == [False, False]
assert tags[{'deciduous'}] == [False, True]
assert tags[{'evergreen'}] == [True, False]
assert tags[{'new jersey'}] == [False, True]
assert tags[{'new york'}] == [True, False]
# test for Treants with ALL the given tags
assert tags[['tree', 'deciduous']] == [True, False]
assert tags[['tree', 'evergreen']] == [False, True]
assert tags[['new jersey', 'evergreen']] == [False, False]
# test for Treants with ANY of the given tags
assert tags[('tree', 'deciduous')] == [True, True]
assert tags[('deciduous', 'evergreen')] == [True, True]
assert tags[('new york', 'evergreen')] == [False, True]
# test for Treants without at least one of the given tags
assert tags[{'deciduous', 'evergreen'}] == [True, True]
assert tags[{'tree', 'deciduous'}] == [False, True]
assert tags[{'tree', 'new york', 'evergreen'}] == [True, False]
# complex logic tests
# give those that are evergreen or in NY AND also not deciduous
selection = [('evergreen', 'new york'), {'deciduous'}]
assert tags[selection] == [False, True]
# give those that are evergreen or in NY AND also not a tree
selection = [('evergreen', 'new york'), {'tree'}]
assert tags[selection] == [False, False]
# give a tree that's in NJ OR anything that's not evergreen
selection = (['tree', 'new jersey'], {'evergreen'})
assert tags[selection] == [True, False]
# cannot be a tree in NJ, AND must also be deciduous
# I.e., give all deciduous things that aren't trees in NJ
selection = [{'tree', 'new jersey'}, 'deciduous']
assert tags[selection] == [False, False]
def test_tags_fuzzy(self, collection, testtreant, testtreant2, tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('pine')
t1.tags.add({'tree', 'new jersey', 'deciduous'})
t2.tags.add({'tree', 'new york', 'evergreen'})
col = collection(t1, t2)
tags = col.tags
assert len(tags.any) == 5
all_tree1 = tags.fuzzy('tree', threshold=80, scope='all')
all_tree2 = tags.fuzzy('tree')
assert all_tree1 == all_tree2
assert all_tree2 == ('tree',)
any_deciduous = tags.fuzzy('deciduous', scope='any')
assert any_deciduous == ('deciduous',)
all_evergreen = tags.fuzzy('evergreen')
assert all_evergreen == ()
# check that fuzzy matching is independent of threshold when
# exact tag is present in all members
all_tree_strict = tags.fuzzy('tree', threshold=99)
assert all_tree_strict == ('tree',)
all_tree_tolerant = tags.fuzzy('tree', threshold=0)
assert all_tree_tolerant == ('tree',)
# check that fuzzy matching will give differing tags when
# members have similar tag names ('new') and the threshold is
# varied
all_ny = tags.fuzzy('new york')
assert all_ny == ()
any_ny_strict = tags.fuzzy('new york', scope='any')
assert any_ny_strict == ('new york',)
any_ny_tol = tags.fuzzy('new york', threshold=50, scope='any')
assert set(any_ny_tol) == {'new york', 'new jersey'}
# check fuzzy matching for multiple tags (scope='all')
new_ever = ['new', 'evergreen']
all_mul_strict = tags.fuzzy(new_ever, threshold=80)
assert all_mul_strict == ()
all_mul_tol = tags.fuzzy(new_ever, threshold=30)
assert all_mul_tol == ('tree',)
# check fuzzy matching for multiple tags (scope='any')
new_tree = ['new', 'tree']
any_mul_stric = tags.fuzzy(new_tree, threshold=90, scope='any')
assert any_mul_stric == ('tree',)
any_mul_tol = tags.fuzzy(new_tree, threshold=80, scope='any')
assert set(any_mul_tol) == {'new york', 'new jersey', 'tree'}
nj_decid = ['new jersey', 'decid']
any_mul_njdec = tags.fuzzy(nj_decid, threshold=80, scope='any')
assert set(any_mul_njdec) == {'new jersey', 'deciduous'}
def test_tags_filter(self, collection, testtreant,
testtreant2, tmpdir):
with tmpdir.as_cwd():
maple = dtr.Treant('maple')
pine = dtr.Treant('pine')
maple.tags.add({'tree', 'new jersey', 'deciduous'})
pine.tags.add({'tree', 'new york', 'evergreen'})
col = collection(maple, pine)
tags = col.tags
maple_bund = dtr.Bundle(maple)
pine_bund = dtr.Bundle(pine)
assert len(tags.any) == 5
# filter using single tags
assert tags.filter('tree') == col
assert tags.filter({'tree'}) == dtr.Bundle()
assert tags.filter('deciduous') == maple_bund
assert tags.filter('evergreen') == pine_bund
assert tags.filter('new jersey') == maple_bund
assert tags.filter('new york') == pine_bund
# filter Treants that DON'T have a given tag
assert tags.filter({'new york'}) == maple_bund
assert tags.filter({'deciduous'}) == pine_bund
# filter Treants containing all of the tags
assert tags.filter(['deciduous', 'tree']) == maple_bund
assert tags.filter(['evergreen', 'tree']) == pine_bund
assert tags.filter(['deciduous', 'new york']) == dtr.Bundle()
# filter Treants containing any of the tags tags
assert tags.filter(('evergreen', 'tree')) == col
assert tags.filter(('deciduous', 'new york')) == col
assert tags.filter(('evergreen', 'new york')) == pine_bund
# filter Treants that exclude any of the provided tags
assert tags.filter({'deciduous', 'new york'}) == col
assert tags.filter({'deciduous', 'new jersey'}) == pine_bund
assert tags.filter({'evergreen', 'tree'}) == maple_bund
# complex logic tests
# give those that are evergreen or in NY AND also not deciduous
selection = [('evergreen', 'new york'), {'deciduous'}]
assert tags.filter(selection) == pine_bund
# give those that are evergreen or in NY AND also not a tree
selection = [('evergreen', 'new york'), {'tree'}]
assert tags.filter(selection) == dtr.Bundle()
# give a tree that's in NJ OR anything that's not evergreen
selection = (['tree', 'new jersey'], {'evergreen'})
assert tags.filter(selection) == maple_bund
# cannot be a tree in NJ, AND must also be deciduous
# I.e., give all deciduous things that aren't trees in NJ
selection = [{'tree', 'new jersey'}, 'deciduous']
assert tags.filter(selection) == dtr.Bundle()
class TestAggCategories:
"""Test behavior of manipulating categories collectively.
"""
def test_add_categories(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
assert len(col.categories) == 0
# add 'age' and 'bark' as categories of this collection
col.categories.add({'age': 42}, bark='smooth')
assert len(col.categories) == 2
for member in col:
assert member.categories['age'] == 42
assert member.categories['bark'] == 'smooth'
for key in ['age', 'bark']:
assert key in col.categories.any
t1 = dtr.Treant('hickory')
t1.categories.add(bark='shaggy', species='ovata')
col += collection(t1)
assert len(col.categories) == 1
assert len(col.categories.all) == 1
assert len(col.categories.any) == 3
col.categories.add(location='USA')
assert len(col.categories) == 2
assert len(col.categories.all) == 2
assert len(col.categories.any) == 4
for member in col:
assert member.categories['location'] == 'USA'
def test_categories_getitem(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection((testtreant, testtreant2))
# add 'age' and 'bark' as categories of this collection
col.categories.add({'age': 42, 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
col += collection((t1, t2))
assert len(col.categories) == 2
assert len(col.categories.any) == 4
# test values for each category in the collection
age_list = [42, 42, 'seedling', 'adult']
assert age_list == col.categories['age']
bark_list = ['smooth', 'smooth', 'rough', 'rough']
assert bark_list == col.categories['bark']
type_list = [None, None, 'deciduous', 'evergreen']
assert type_list == col.categories['type']
nick_list = [None, None, None, 'redwood']
assert nick_list == col.categories['nickname']
# test list of keys as input
cat_list = [age_list, type_list]
assert cat_list == col.categories[['age', 'type']]
# test set of keys as input
cat_set = {'bark': bark_list, 'nickname': nick_list}
assert cat_set == col.categories[{'bark', 'nickname'}]
def test_categories_setitem(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
# add 'age' and 'bark' as categories of this collection
col.categories.add({'age': 42, 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
col += collection(t1, t2)
# test setting a category when all members have it
for value in col.categories['age']:
assert value in [42, 42, 'seedling', 'adult']
col.categories['age'] = 'old'
for value in col.categories['age']:
assert value in ['old', 'old', 'old', 'old']
# test setting a new category (no members have it)
assert 'location' not in col.categories.any
col.categories['location'] = 'USA'
for value in col.categories['location']:
assert value in ['USA', 'USA', 'USA', 'USA']
# test setting a category that only some members have
assert 'nickname' in col.categories.any
assert 'nickname' not in col.categories.all
col.categories['nickname'] = 'friend'
for value in col.categories['nickname']:
assert value in ['friend', 'friend', 'friend', 'friend']
# test setting values for individual members
assert 'favorite ice cream' not in col.categories
ice_creams = ['rocky road',
'americone dream',
'moose tracks',
'vanilla']
col.categories['favorite ice cream'] = ice_creams
for member, ice_cream in zip(col, ice_creams):
assert member.categories['favorite ice cream'] == ice_cream
def test_categories_all(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
# add 'age' and 'bark' as categories of this collection
col.categories.add({'age': 42}, bark='bare')
# add categories to 'hickory' Treant, then add to collection
t1 = dtr.Treant('hickory')
t1.categories.add(bark='shaggy', species='ovata')
col += collection(t1)
# check the contents of 'bark', ensure 'age' and 'species' are
# not shared categories of the collection
col += collection(t1)
common_categories = col.categories.all
assert len(col.categories) == len(common_categories)
assert 'age' not in common_categories
assert 'species' not in common_categories
assert common_categories['bark'] == ['bare', 'bare', 'shaggy']
# add 'location' category to collection
col.categories.add(location='USA')
common_categories = col.categories.all
# ensure all members have 'USA' for their 'location'
assert len(col.categories) == len(common_categories)
assert 'age' not in common_categories
assert 'species' not in common_categories
assert common_categories['bark'] == ['bare', 'bare', 'shaggy']
assert common_categories['location'] == ['USA', 'USA', 'USA']
# add 'location' category to collection
col.categories.remove('bark')
common_categories = col.categories.all
# check that only 'location' is a shared category
assert len(col.categories) == len(common_categories)
assert 'age' not in common_categories
assert 'bark' not in common_categories
assert 'species' not in common_categories
assert common_categories['location'] == ['USA', 'USA', 'USA']
def test_categories_any(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
# add 'age' and 'bark' as categories of this collection
col.categories.add({'age': 42}, bark='smooth')
assert len(col.categories.any) == 2
# add categories to 'hickory' Treant, then add to collection
t1 = dtr.Treant('hickory')
t1.categories.add(bark='shaggy', species='ovata')
col += collection(t1)
# check the contents of 'bark', ensure 'age' and 'species' are
# not shared categories of the collection
every_category = col.categories.any
assert len(every_category) == 3
assert every_category['age'] == [42, 42, None]
assert every_category['bark'] == ['smooth', 'smooth', 'shaggy']
assert every_category['species'] == [None, None, 'ovata']
# add 'location' category to collection
col.categories.add(location='USA')
every_category = col.categories.any
# ensure all members have 'USA' for their 'location'
assert len(every_category) == 4
assert every_category['age'] == [42, 42, None]
assert every_category['bark'] == ['smooth', 'smooth', 'shaggy']
assert every_category['species'] == [None, None, 'ovata']
assert every_category['location'] == ['USA', 'USA', 'USA']
# add 'sprout' to 'age' category of 'hickory' Treant
t1.categories['age'] = 'sprout'
every_category = col.categories.any
# check 'age' is category for 'hickory' and is 'sprout'
assert len(every_category) == 4
assert every_category['age'] == [42, 42, 'sprout']
assert every_category['bark'] == ['smooth', 'smooth', 'shaggy']
assert every_category['species'] == [None, None, 'ovata']
assert every_category['location'] == ['USA', 'USA', 'USA']
# add 'location' category to collection
col.categories.remove('bark')
every_category = col.categories.any
# check that only 'location' is a shared category
assert len(every_category) == 3
assert every_category['age'] == [42, 42, 'sprout']
assert every_category['species'] == [None, None, 'ovata']
assert every_category['location'] == ['USA', 'USA', 'USA']
assert 'bark' not in every_category
def test_categories_remove(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
col = collection(t1, t2)
col.categories.add({'age': 'sprout'}, bark='rough')
# add a couple test Treants to collection
col += collection(testtreant, testtreant2)
assert len(col.categories) == 0
assert len(col.categories.any) == 2
# add 'USA', ensure 'location', 'age', 'bark' is a category in
# at least one of the members
col.categories.add(location='USA')
assert len(col.categories) == 1
for key in ['location', 'age', 'bark']:
assert key in col.categories.any
# ensure 'age' and 'bark' are each not categories for all
# members in collection
assert 'age' not in col.categories
assert 'bark' not in col.categories
# remove 'bark', test for any instance of 'bark' in the
# collection
col.categories.remove('bark')
assert len(col.categories) == 1
for key in ['location', 'age']:
assert key in col.categories.any
assert 'bark' not in col.categories.any
# remove 'age', test that 'age' is not a category for any
# member in collection
col.categories.remove('age')
for member in col:
assert 'age' not in member.categories
# test that 'age' is not a category of this collection
assert 'age' not in col.categories.any
def test_categories_keys(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
col.categories.add({'age': 42, 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
col += collection(t1, t2)
for k in col.categories.keys(scope='all'):
for member in col:
assert k in member.categories
for k in col.categories.keys(scope='any'):
for member in col:
if k == 'nickname':
if member.name == 'maple':
assert k not in member.categories
elif member.name == 'sequoia':
assert k in member.categories
elif k == 'type':
if (member.name != 'maple' and
member.name != 'sequoia'):
assert k not in member.categories
else:
assert k in member.categories
def test_categories_values(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
col.categories.add({'age': 'young', 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
col += collection(t1, t2)
for scope in ('all', 'any'):
for i, v in enumerate(
col.categories.values(scope=scope)):
assert v == col.categories[
col.categories.keys(scope=scope)[i]]
def test_categories_items(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
# add a couple test Treants to collection
col = collection(testtreant, testtreant2)
col.categories.add({'age': 'young', 'bark': 'smooth'})
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t1.categories.add({'age': 'seedling', 'bark': 'rough',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'rough',
'type': 'evergreen', 'nickname': 'redwood'})
col += collection(t1, t2)
for scope in ('all', 'any'):
keys = col.categories.keys(scope=scope)
for i, values in enumerate(
col.categories.items(scope=scope)):
for j, v in enumerate(values):
assert v[1] == col.categories[keys[i]][j]
assert v[0] == keys[i]
def test_categories_groupby(self, collection, testtreant, testtreant2,
tmpdir):
with tmpdir.as_cwd():
t1 = dtr.Treant('maple')
t2 = dtr.Treant('sequoia')
t3 = dtr.Treant('elm')
t4 = dtr.Treant('oak')
t1.categories.add({'age': 'young', 'bark': 'smooth',
'type': 'deciduous'})
t2.categories.add({'age': 'adult', 'bark': 'fibrous',
'type': 'evergreen', 'nickname': 'redwood'})
t3.categories.add({'age': 'old', 'bark': 'mossy',
'type': 'deciduous', 'health': 'poor'})
t4.categories.add({'age': 'young', 'bark': 'mossy',
'type': 'deciduous', 'health': 'good'})
col = collection(t1, t2, t3, t4)
age_group = col.categories.groupby('age')
assert {t1, t4} == set(age_group['young'])
assert {t2} == set(age_group['adult'])
assert {t3} == set(age_group['old'])
bark_group = col.categories.groupby('bark')
assert {t1} == set(bark_group['smooth'])
assert {t2} == set(bark_group['fibrous'])
assert {t3, t4} == set(bark_group['mossy'])
type_group = col.categories.groupby('type')
assert {t1, t3, t4} == set(type_group['deciduous'])
assert {t2} == set(type_group['evergreen'])
nick_group = col.categories.groupby('nickname')
assert {t2} == set(nick_group['redwood'])
for bundle in nick_group.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
health_group = col.categories.groupby('health')
assert {t3} == set(health_group['poor'])
assert {t4} == set(health_group['good'])
for bundle in health_group.values():
assert {t1, t2}.isdisjoint(set(bundle))
# test list of keys as input
age_bark = col.categories.groupby(['age', 'bark'])
assert len(age_bark) == 4
assert {t1} == set(age_bark[('young', 'smooth')])
assert {t2} == set(age_bark[('adult', 'fibrous')])
assert {t3} == set(age_bark[('old', 'mossy')])
assert {t4} == set(age_bark[('young', 'mossy')])
type_health = col.categories.groupby(['type', 'health'])
assert len(type_health) == 2
assert {t3} == set(type_health[('deciduous', 'poor')])
assert {t4} == set(type_health[('deciduous', 'good')])
for bundle in type_health.values():
assert {t1, t2}.isdisjoint(set(bundle))
type_health = col.categories.groupby(['health', 'type'])
assert len(type_health) == 2
assert {t3} == set(type_health[('poor', 'deciduous')])
assert {t4} == set(type_health[('good', 'deciduous')])
for bundle in type_health.values():
assert {t1, t2}.isdisjoint(set(bundle))
age_nick = col.categories.groupby(['age', 'nickname'])
assert len(age_nick) == 1
assert {t2} == set(age_nick['adult', 'redwood'])
for bundle in age_nick.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = ['age', 'bark', 'health']
age_bark_health = col.categories.groupby(keys)
assert len(age_bark_health) == 2
assert {t3} == set(age_bark_health[('old', 'mossy', 'poor')])
assert {t4} == set(age_bark_health[('young', 'mossy', 'good')])
for bundle in age_bark_health.values():
assert {t1, t2}.isdisjoint(set(bundle))
keys = ['age', 'bark', 'type', 'nickname']
abtn = col.categories.groupby(keys)
assert len(abtn) == 1
assert {t2} == set(abtn[('adult', 'fibrous', 'evergreen',
'redwood')])
for bundle in abtn.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = ['bark', 'nickname', 'type', 'age']
abtn2 = col.categories.groupby(keys)
assert len(abtn2) == 1
assert {t2} == set(abtn2[('fibrous', 'redwood', 'evergreen',
'adult')])
for bundle in abtn2.values():
assert {t1, t3, t4}.isdisjoint(set(bundle))
keys = ['health', 'nickname']
health_nick = col.categories.groupby(keys)
assert len(health_nick) == 0
for bundle in health_nick.values():
assert {t1, t2, t3, t4}.isdisjoint(set(bundle))
# Test key TypeError in groupby
with pytest.raises(TypeError) as e:
col.categories.groupby({'health', 'nickname'})
| bsd-3-clause | 14b6eeb79b7bc4b09c6970619a00141a | 40.440565 | 79 | 0.486952 | 3.985371 | false | true | false | false |
datreant/datreant | src/datreant/selectionparser.py | 1 | 2710 | from pyparsing import (CaselessLiteral, Word, quotedString,
removeQuotes, infix_notation, opAssoc, stringEnd,
ParseException)
__all__ = ['parse_selection']
class UnaryOperation(object):
def __init__(self, t):
self.op, self.a = t[0]
class BinaryOperation(object):
def __init__(self, t):
self.op = t[0][1]
self.operands = t[0][0::2]
class SearchAnd(BinaryOperation):
def generate_tag_expr(self):
return list(oper.generate_tag_expr() for oper in self.operands)
def __repr__(self):
return "AND:(%s)" % (",".join(str(oper) for oper in self.operands))
class SearchOr(BinaryOperation):
def generate_tag_expr(self):
return tuple(oper.generate_tag_expr() for oper in self.operands)
def __repr__(self):
return "OR:(%s)" % (",".join(str(oper) for oper in self.operands))
class SearchNot(UnaryOperation):
def generate_tag_expr(self):
exps = self.a.generate_tag_expr()
if isinstance(exps, list):
return [{e} for e in exps]
elif isinstance(exps, tuple):
return tuple({e} for e in exps)
else:
return {exps}
def __repr__(self):
return "NOT:(%s)" % str(self.a)
class SearchTerm(object):
def __init__(self, tokens):
self.term = tokens[0]
def generate_tag_expr(self):
return "{}".format(self.term)
def __repr__(self):
return self.term
# define the grammar
and_ = CaselessLiteral("and")
or_ = CaselessLiteral("or")
not_ = CaselessLiteral("not")
allowed_chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'*+,-./:;<=>?@[]^_`{|}~'
# first remove matching strings and then parse for printable characters
searchTerm = quotedString.setParseAction(removeQuotes) | Word(allowed_chars)
searchTerm.setParseAction(SearchTerm)
searchExpr = infix_notation(searchTerm, [
(not_, 1, opAssoc.RIGHT, SearchNot),
(and_, 2, opAssoc.LEFT, SearchAnd),
(or_, 2, opAssoc.LEFT, SearchOr),
])
Parser = searchExpr + stringEnd
def parse_selection(sel):
"""
Parse a tag selection string and convert it to the default list/tuple/set
tag selections. If the selection can't be parsed the original selection
is returned.
Parameters
----------
sel : str
selection string
Returns
-------
list/tuple/set representation used to filter tags
Example
-------
>>> parse_selection('food and drinks')
['food', 'drinks']
>>> parse_selection('free beer')
'free beer'
"""
try:
return Parser.parseString(sel)[0].generate_tag_expr()
except ParseException:
return sel
| bsd-3-clause | 525cc2b957a7b51786cedbe746159061 | 25.831683 | 110 | 0.61845 | 3.687075 | false | false | false | false |
fredrik-johansson/mpmath | mpmath/tests/test_fp.py | 15 | 89997 | """
Easy-to-use test-generating code:
cases = '''
exp 2.25
log 2.25
'''
from mpmath import *
mp.dps = 20
for test in cases.splitlines():
if not test:
continue
words = test.split()
fname = words[0]
args = words[1:]
argstr = ", ".join(args)
testline = "%s(%s)" % (fname, argstr)
ans = str(eval(testline))
print " assert ae(fp.%s, %s)" % (testline, ans)
"""
from mpmath import fp
def ae(x, y, tol=1e-12):
if x == y:
return True
return abs(x-y) <= tol*abs(y)
def test_conj():
assert fp.conj(4) == 4
assert fp.conj(3+4j) == 3-4j
assert fp.fdot([1,2],[3,2+1j], conjugate=True) == 7-2j
def test_fp_number_parts():
assert ae(fp.arg(3), 0.0)
assert ae(fp.arg(-3), 3.1415926535897932385)
assert ae(fp.arg(3j), 1.5707963267948966192)
assert ae(fp.arg(-3j), -1.5707963267948966192)
assert ae(fp.arg(2+3j), 0.98279372324732906799)
assert ae(fp.arg(-1-1j), -2.3561944901923449288)
assert ae(fp.re(2.5), 2.5)
assert ae(fp.re(2.5+3j), 2.5)
assert ae(fp.im(2.5), 0.0)
assert ae(fp.im(2.5+3j), 3.0)
assert ae(fp.floor(2.5), 2.0)
assert ae(fp.floor(2), 2.0)
assert ae(fp.floor(2.0+0j), (2.0 + 0.0j))
assert ae(fp.floor(-1.5-0.5j), (-2.0 - 1.0j))
assert ae(fp.ceil(2.5), 3.0)
assert ae(fp.ceil(2), 2.0)
assert ae(fp.ceil(2.0+0j), (2.0 + 0.0j))
assert ae(fp.ceil(-1.5-0.5j), (-1.0 + 0.0j))
def test_fp_cospi_sinpi():
assert ae(fp.sinpi(0), 0.0)
assert ae(fp.sinpi(0.25), 0.7071067811865475244)
assert ae(fp.sinpi(0.5), 1.0)
assert ae(fp.sinpi(0.75), 0.7071067811865475244)
assert ae(fp.sinpi(1), 0.0)
assert ae(fp.sinpi(1.25), -0.7071067811865475244)
assert ae(fp.sinpi(1.5), -1.0)
assert ae(fp.sinpi(1.75), -0.7071067811865475244)
assert ae(fp.sinpi(2), 0.0)
assert ae(fp.sinpi(2.25), 0.7071067811865475244)
assert ae(fp.sinpi(0+3j), (0.0 + 6195.8238636085899556j))
assert ae(fp.sinpi(0.25+3j), (4381.1091260582448033 + 4381.1090689950686908j))
assert ae(fp.sinpi(0.5+3j), (6195.8239443081075259 + 0.0j))
assert ae(fp.sinpi(0.75+3j), (4381.1091260582448033 - 4381.1090689950686908j))
assert ae(fp.sinpi(1+3j), (0.0 - 6195.8238636085899556j))
assert ae(fp.sinpi(1.25+3j), (-4381.1091260582448033 - 4381.1090689950686908j))
assert ae(fp.sinpi(1.5+3j), (-6195.8239443081075259 + 0.0j))
assert ae(fp.sinpi(1.75+3j), (-4381.1091260582448033 + 4381.1090689950686908j))
assert ae(fp.sinpi(2+3j), (0.0 + 6195.8238636085899556j))
assert ae(fp.sinpi(2.25+3j), (4381.1091260582448033 + 4381.1090689950686908j))
assert ae(fp.sinpi(-0.75), -0.7071067811865475244)
assert ae(fp.sinpi(-1e-10), -3.1415926535897933529e-10)
assert ae(fp.sinpi(1e-10), 3.1415926535897933529e-10)
assert ae(fp.sinpi(1e-10+1e-10j), (3.141592653589793353e-10 + 3.1415926535897933528e-10j))
assert ae(fp.sinpi(1e-10-1e-10j), (3.141592653589793353e-10 - 3.1415926535897933528e-10j))
assert ae(fp.sinpi(-1e-10+1e-10j), (-3.141592653589793353e-10 + 3.1415926535897933528e-10j))
assert ae(fp.sinpi(-1e-10-1e-10j), (-3.141592653589793353e-10 - 3.1415926535897933528e-10j))
assert ae(fp.cospi(0), 1.0)
assert ae(fp.cospi(0.25), 0.7071067811865475244)
assert ae(fp.cospi(0.5), 0.0)
assert ae(fp.cospi(0.75), -0.7071067811865475244)
assert ae(fp.cospi(1), -1.0)
assert ae(fp.cospi(1.25), -0.7071067811865475244)
assert ae(fp.cospi(1.5), 0.0)
assert ae(fp.cospi(1.75), 0.7071067811865475244)
assert ae(fp.cospi(2), 1.0)
assert ae(fp.cospi(2.25), 0.7071067811865475244)
assert ae(fp.cospi(0+3j), (6195.8239443081075259 + 0.0j))
assert ae(fp.cospi(0.25+3j), (4381.1091260582448033 - 4381.1090689950686908j))
assert ae(fp.cospi(0.5+3j), (0.0 - 6195.8238636085899556j))
assert ae(fp.cospi(0.75+3j), (-4381.1091260582448033 - 4381.1090689950686908j))
assert ae(fp.cospi(1+3j), (-6195.8239443081075259 + 0.0j))
assert ae(fp.cospi(1.25+3j), (-4381.1091260582448033 + 4381.1090689950686908j))
assert ae(fp.cospi(1.5+3j), (0.0 + 6195.8238636085899556j))
assert ae(fp.cospi(1.75+3j), (4381.1091260582448033 + 4381.1090689950686908j))
assert ae(fp.cospi(2+3j), (6195.8239443081075259 + 0.0j))
assert ae(fp.cospi(2.25+3j), (4381.1091260582448033 - 4381.1090689950686908j))
assert ae(fp.cospi(-0.75), -0.7071067811865475244)
assert ae(fp.sinpi(-0.7), -0.80901699437494750611)
assert ae(fp.cospi(-0.7), -0.5877852522924730163)
assert ae(fp.cospi(-3+2j), (-267.74676148374822225 + 0.0j))
assert ae(fp.sinpi(-3+2j), (0.0 - 267.74489404101651426j))
assert ae(fp.sinpi(-0.7+2j), (-216.6116802292079471 - 157.37650009392034693j))
assert ae(fp.cospi(-0.7+2j), (-157.37759774921754565 + 216.61016943630197336j))
def test_fp_expj():
assert ae(fp.expj(0), (1.0 + 0.0j))
assert ae(fp.expj(1), (0.5403023058681397174 + 0.84147098480789650665j))
assert ae(fp.expj(2), (-0.416146836547142387 + 0.9092974268256816954j))
assert ae(fp.expj(0.75), (0.73168886887382088631 + 0.68163876002333416673j))
assert ae(fp.expj(2+3j), (-0.020718731002242879378 + 0.045271253156092975488j))
assert ae(fp.expjpi(0), (1.0 + 0.0j))
assert ae(fp.expjpi(1), (-1.0 + 0.0j))
assert ae(fp.expjpi(2), (1.0 + 0.0j))
assert ae(fp.expjpi(0.75), (-0.7071067811865475244 + 0.7071067811865475244j))
assert ae(fp.expjpi(2+3j), (0.000080699517570304599239 + 0.0j))
def test_fp_bernoulli():
assert ae(fp.bernoulli(0), 1.0)
assert ae(fp.bernoulli(1), -0.5)
assert ae(fp.bernoulli(2), 0.16666666666666666667)
assert ae(fp.bernoulli(10), 0.075757575757575757576)
assert ae(fp.bernoulli(11), 0.0)
def test_fp_gamma():
assert ae(fp.gamma(1), 1.0)
assert ae(fp.gamma(1.5), 0.88622692545275801365)
assert ae(fp.gamma(10), 362880.0)
assert ae(fp.gamma(-0.5), -3.5449077018110320546)
assert ae(fp.gamma(-7.1), 0.0016478244570263333622)
assert ae(fp.gamma(12.3), 83385367.899970000963)
assert ae(fp.gamma(2+0j), (1.0 + 0.0j))
assert ae(fp.gamma(-2.5+0j), (-0.94530872048294188123 + 0.0j))
assert ae(fp.gamma(3+4j), (0.0052255384713692141947 - 0.17254707929430018772j))
assert ae(fp.gamma(-3-4j), (0.00001460997305874775607 - 0.000020760733311509070396j))
assert ae(fp.fac(0), 1.0)
assert ae(fp.fac(1), 1.0)
assert ae(fp.fac(20), 2432902008176640000.0)
assert ae(fp.fac(-3.5), -0.94530872048294188123)
assert ae(fp.fac(2+3j), (-0.44011340763700171113 - 0.06363724312631702183j))
assert ae(fp.loggamma(1.0), 0.0)
assert ae(fp.loggamma(2.0), 0.0)
assert ae(fp.loggamma(3.0), 0.69314718055994530942)
assert ae(fp.loggamma(7.25), 7.0521854507385394449)
assert ae(fp.loggamma(1000.0), 5905.2204232091812118)
assert ae(fp.loggamma(1e50), 1.1412925464970229298e+52)
assert ae(fp.loggamma(1e25+1e25j), (5.6125802751733671621e+26 + 5.7696599078528568383e+26j))
assert ae(fp.loggamma(3+4j), (-1.7566267846037841105 + 4.7426644380346579282j))
assert ae(fp.loggamma(-0.5), (1.2655121234846453965 - 3.1415926535897932385j))
assert ae(fp.loggamma(-1.25), (1.3664317612369762346 - 6.2831853071795864769j))
assert ae(fp.loggamma(-2.75), (0.0044878975359557733115 - 9.4247779607693797154j))
assert ae(fp.loggamma(-3.5), (-1.3090066849930420464 - 12.566370614359172954j))
assert ae(fp.loggamma(-4.5), (-2.8130840817693161197 - 15.707963267948966192j))
assert ae(fp.loggamma(-2+3j), (-6.776523813485657093 - 4.568791367260286402j))
assert ae(fp.loggamma(-1000.3), (-5912.8440347785205041 - 3144.7342462433830317j))
assert ae(fp.loggamma(-100-100j), (-632.35117666833135562 - 158.37641469650352462j))
assert ae(fp.loggamma(1e-10), 23.025850929882735237)
assert ae(fp.loggamma(-1e-10), (23.02585092999817837 - 3.1415926535897932385j))
assert ae(fp.loggamma(1e-10j), (23.025850929940456804 - 1.5707963268526181857j))
assert ae(fp.loggamma(1e-10j-1e-10), (22.679277339718205716 - 2.3561944902500664954j))
def test_fp_psi():
assert ae(fp.psi(0, 3.7), 1.1671535393615114409)
assert ae(fp.psi(0, 0.5), -1.9635100260214234794)
assert ae(fp.psi(0, 1), -0.57721566490153286061)
assert ae(fp.psi(0, -2.5), 1.1031566406452431872)
assert ae(fp.psi(0, 12.9), 2.5179671503279156347)
assert ae(fp.psi(0, 100), 4.6001618527380874002)
assert ae(fp.psi(0, 2500.3), 7.8239660143238547877)
assert ae(fp.psi(0, 1e40), 92.103403719761827391)
assert ae(fp.psi(0, 1e200), 460.51701859880913677)
assert ae(fp.psi(0, 3.7+0j), (1.1671535393615114409 + 0.0j))
assert ae(fp.psi(1, 3), 0.39493406684822643647)
assert ae(fp.psi(3, 2+3j), (-0.05383196209159972116 + 0.0076890935247364805218j))
assert ae(fp.psi(4, -0.5+1j), (1.2719531355492328195 - 18.211833410936276774j))
assert ae(fp.harmonic(0), 0.0)
assert ae(fp.harmonic(1), 1.0)
assert ae(fp.harmonic(2), 1.5)
assert ae(fp.harmonic(100), 5.1873775176396202608)
assert ae(fp.harmonic(-2.5), 1.2803723055467760478)
assert ae(fp.harmonic(2+3j), (1.9390425294578375875 + 0.87336044981834544043j))
assert ae(fp.harmonic(-5-4j), (2.3725754822349437733 - 2.4160904444801621j))
def test_fp_zeta():
assert ae(fp.zeta(1e100), 1.0)
assert ae(fp.zeta(3), 1.2020569031595942854)
assert ae(fp.zeta(2+0j), (1.6449340668482264365 + 0.0j))
assert ae(fp.zeta(0.93), -13.713619351638164784)
assert ae(fp.zeta(1.74), 1.9796863545771774095)
assert ae(fp.zeta(0.0), -0.5)
assert ae(fp.zeta(-1.0), -0.083333333333333333333)
assert ae(fp.zeta(-2.0), 0.0)
assert ae(fp.zeta(-3.0), 0.0083333333333333333333)
assert ae(fp.zeta(-500.0), 0.0)
assert ae(fp.zeta(-7.4), 0.0036537321227995882447)
assert ae(fp.zeta(2.1), 1.5602165335033620158)
assert ae(fp.zeta(26.9), 1.0000000079854809935)
assert ae(fp.zeta(26), 1.0000000149015548284)
assert ae(fp.zeta(27), 1.0000000074507117898)
assert ae(fp.zeta(28), 1.0000000037253340248)
assert ae(fp.zeta(27.1), 1.000000006951755045)
assert ae(fp.zeta(32.7), 1.0000000001433243232)
assert ae(fp.zeta(100), 1.0)
assert ae(fp.altzeta(3.5), 0.92755357777394803511)
assert ae(fp.altzeta(1), 0.69314718055994530942)
assert ae(fp.altzeta(2), 0.82246703342411321824)
assert ae(fp.altzeta(0), 0.5)
assert ae(fp.zeta(-2+3j, 1), (0.13297115587929864827 + 0.12305330040458776494j))
assert ae(fp.zeta(-2+3j, 5), (18.384866151867576927 - 11.377015110597711009j))
assert ae(fp.zeta(1.0000000001), 9999999173.1735741337)
assert ae(fp.zeta(0.9999999999), -9999999172.0191428039)
assert ae(fp.zeta(1+0.000000001j), (0.57721566490153286061 - 999999999.99999993765j))
assert ae(fp.primezeta(2.5+4j), (-0.16922458243438033385 - 0.010847965298387727811j))
assert ae(fp.primezeta(4), 0.076993139764246844943)
assert ae(fp.riemannr(3.7), 2.3034079839110855717)
assert ae(fp.riemannr(8), 3.9011860449341499474)
assert ae(fp.riemannr(3+4j), (2.2369653314259991796 + 1.6339943856990281694j))
def test_fp_hyp2f1():
assert ae(fp.hyp2f1(1, (3,2), 3.25, 5.0), (-0.46600275923108143059 - 0.74393667908854842325j))
assert ae(fp.hyp2f1(1+1j, (3,2), 3.25, 5.0), (-5.9208875603806515987 - 2.3813557707889590686j))
assert ae(fp.hyp2f1(1+1j, (3,2), 3.25, 2+3j), (0.17174552030925080445 + 0.19589781970539389999j))
def test_fp_erf():
assert fp.erf(2) == fp.erf(2.0) == fp.erf(2.0+0.0j)
assert fp.erf(fp.inf) == 1.0
assert fp.erf(fp.ninf) == -1.0
assert ae(fp.erf(0), 0.0)
assert ae(fp.erf(-0), -0.0)
assert ae(fp.erf(0.3), 0.32862675945912741619)
assert ae(fp.erf(-0.3), -0.32862675945912741619)
assert ae(fp.erf(0.9), 0.79690821242283213966)
assert ae(fp.erf(-0.9), -0.79690821242283213966)
assert ae(fp.erf(1.0), 0.84270079294971486934)
assert ae(fp.erf(-1.0), -0.84270079294971486934)
assert ae(fp.erf(1.1), 0.88020506957408172966)
assert ae(fp.erf(-1.1), -0.88020506957408172966)
assert ae(fp.erf(8.5), 1.0)
assert ae(fp.erf(-8.5), -1.0)
assert ae(fp.erf(9.1), 1.0)
assert ae(fp.erf(-9.1), -1.0)
assert ae(fp.erf(20.0), 1.0)
assert ae(fp.erf(-20.0), -1.0)
assert ae(fp.erf(10000.0), 1.0)
assert ae(fp.erf(-10000.0), -1.0)
assert ae(fp.erf(1e+50), 1.0)
assert ae(fp.erf(-1e+50), -1.0)
assert ae(fp.erf(1j), 1.650425758797542876j)
assert ae(fp.erf(-1j), -1.650425758797542876j)
assert ae(fp.erf((2+3j)), (-20.829461427614568389 + 8.6873182714701631444j))
assert ae(fp.erf(-(2+3j)), -(-20.829461427614568389 + 8.6873182714701631444j))
assert ae(fp.erf((8+9j)), (-1072004.2525062051158 + 364149.91954310255423j))
assert ae(fp.erf(-(8+9j)), -(-1072004.2525062051158 + 364149.91954310255423j))
assert fp.erfc(fp.inf) == 0.0
assert fp.erfc(fp.ninf) == 2.0
assert fp.erfc(0) == 1
assert fp.erfc(-0.0) == 1
assert fp.erfc(0+0j) == 1
assert ae(fp.erfc(0.3), 0.67137324054087258381)
assert ae(fp.erfc(-0.3), 1.3286267594591274162)
assert ae(fp.erfc(0.9), 0.20309178757716786034)
assert ae(fp.erfc(-0.9), 1.7969082124228321397)
assert ae(fp.erfc(1.0), 0.15729920705028513066)
assert ae(fp.erfc(-1.0), 1.8427007929497148693)
assert ae(fp.erfc(1.1), 0.11979493042591827034)
assert ae(fp.erfc(-1.1), 1.8802050695740817297)
assert ae(fp.erfc(8.5), 2.7623240713337714461e-33)
assert ae(fp.erfc(-8.5), 2.0)
assert ae(fp.erfc(9.1), 6.6969004279886077452e-38)
assert ae(fp.erfc(-9.1), 2.0)
assert ae(fp.erfc(20.0), 5.3958656116079009289e-176)
assert ae(fp.erfc(-20.0), 2.0)
assert ae(fp.erfc(10000.0), 0.0)
assert ae(fp.erfc(-10000.0), 2.0)
assert ae(fp.erfc(1e+50), 0.0)
assert ae(fp.erfc(-1e+50), 2.0)
assert ae(fp.erfc(1j), (1.0 - 1.650425758797542876j))
assert ae(fp.erfc(-1j), (1.0 + 1.650425758797542876j))
assert ae(fp.erfc((2+3j)), (21.829461427614568389 - 8.6873182714701631444j), 1e-13)
assert ae(fp.erfc(-(2+3j)), (-19.829461427614568389 + 8.6873182714701631444j), 1e-13)
assert ae(fp.erfc((8+9j)), (1072005.2525062051158 - 364149.91954310255423j))
assert ae(fp.erfc(-(8+9j)), (-1072003.2525062051158 + 364149.91954310255423j))
assert ae(fp.erfc(20+0j), (5.3958656116079009289e-176 + 0.0j))
def test_fp_lambertw():
assert ae(fp.lambertw(0.0), 0.0)
assert ae(fp.lambertw(1.0), 0.567143290409783873)
assert ae(fp.lambertw(7.5), 1.5662309537823875394)
assert ae(fp.lambertw(-0.25), -0.35740295618138890307)
assert ae(fp.lambertw(-10.0), (1.3699809685212708156 + 2.140194527074713196j))
assert ae(fp.lambertw(0+0j), (0.0 + 0.0j))
assert ae(fp.lambertw(4+0j), (1.2021678731970429392 + 0.0j))
assert ae(fp.lambertw(1000.5), 5.2500227450408980127)
assert ae(fp.lambertw(1e100), 224.84310644511850156)
assert ae(fp.lambertw(-1000.0), (5.1501630246362515223 + 2.6641981432905204596j))
assert ae(fp.lambertw(1e-10), 9.9999999990000003645e-11)
assert ae(fp.lambertw(1e-10j), (1.0000000000000000728e-20 + 1.0000000000000000364e-10j))
assert ae(fp.lambertw(3+4j), (1.2815618061237758782 + 0.53309522202097107131j))
assert ae(fp.lambertw(-3-4j), (1.0750730665692549276 - 1.3251023817343588823j))
assert ae(fp.lambertw(10000+1000j), (7.2361526563371602186 + 0.087567810943839352034j))
assert ae(fp.lambertw(0.0, -1), -fp.inf)
assert ae(fp.lambertw(1.0, -1), (-1.5339133197935745079 - 4.3751851530618983855j))
assert ae(fp.lambertw(7.5, -1), (0.44125668415098614999 - 4.8039842008452390179j))
assert ae(fp.lambertw(-0.25, -1), -2.1532923641103496492)
assert ae(fp.lambertw(-10.0, -1), (1.3699809685212708156 - 2.140194527074713196j))
assert ae(fp.lambertw(0+0j, -1), -fp.inf)
assert ae(fp.lambertw(4+0j, -1), (-0.15730793189620765317 - 4.6787800704666656212j))
assert ae(fp.lambertw(1000.5, -1), (4.9153765415404024736 - 5.4465682700815159569j))
assert ae(fp.lambertw(1e100, -1), (224.84272130101601052 - 6.2553713838167244141j))
assert ae(fp.lambertw(-1000.0, -1), (5.1501630246362515223 - 2.6641981432905204596j))
assert ae(fp.lambertw(1e-10, -1), (-26.303186778379041521 - 3.2650939117038283975j))
assert ae(fp.lambertw(1e-10j, -1), (-26.297238779529035028 - 1.6328071613455765135j))
assert ae(fp.lambertw(3+4j, -1), (0.25856740686699741676 - 3.8521166861614355895j))
assert ae(fp.lambertw(-3-4j, -1), (-0.32028750204310768396 - 6.8801677192091972343j))
assert ae(fp.lambertw(10000+1000j, -1), (7.0255308742285435567 - 5.5177506835734067601j))
assert ae(fp.lambertw(0.0, 2), -fp.inf)
assert ae(fp.lambertw(1.0, 2), (-2.4015851048680028842 + 10.776299516115070898j))
assert ae(fp.lambertw(7.5, 2), (-0.38003357962843791529 + 10.960916473368746184j))
assert ae(fp.lambertw(-0.25, 2), (-4.0558735269061511898 + 13.852334658567271386j))
assert ae(fp.lambertw(-10.0, 2), (-0.34479123764318858696 + 14.112740596763592363j))
assert ae(fp.lambertw(0+0j, 2), -fp.inf)
assert ae(fp.lambertw(4+0j, 2), (-1.0070343323804262788 + 10.903476551861683082j))
assert ae(fp.lambertw(1000.5, 2), (4.4076185165459395295 + 11.365524591091402177j))
assert ae(fp.lambertw(1e100, 2), (224.84156762724875878 + 12.510785262632255672j))
assert ae(fp.lambertw(-1000.0, 2), (4.1984245610246530756 + 14.420478573754313845j))
assert ae(fp.lambertw(1e-10, 2), (-26.362258095445866488 + 9.7800247407031482519j))
assert ae(fp.lambertw(1e-10j, 2), (-26.384250801683084252 + 11.403535950607739763j))
assert ae(fp.lambertw(3+4j, 2), (-0.86554679943333993562 + 11.849956798331992027j))
assert ae(fp.lambertw(-3-4j, 2), (-0.55792273874679112639 + 8.7173627024159324811j))
assert ae(fp.lambertw(10000+1000j, 2), (6.6223802254585662734 + 11.61348646825020766j))
def test_fp_stress_ei_e1():
# Can be tightened on recent Pythons with more accurate math/cmath
ATOL = 1e-13
PTOL = 1e-12
v = fp.e1(1.1641532182693481445e-10)
assert ae(v, 22.296641293693077672, tol=ATOL)
assert type(v) is float
v = fp.e1(0.25)
assert ae(v, 1.0442826344437381945, tol=ATOL)
assert type(v) is float
v = fp.e1(1.0)
assert ae(v, 0.21938393439552027368, tol=ATOL)
assert type(v) is float
v = fp.e1(2.0)
assert ae(v, 0.048900510708061119567, tol=ATOL)
assert type(v) is float
v = fp.e1(5.0)
assert ae(v, 0.0011482955912753257973, tol=ATOL)
assert type(v) is float
v = fp.e1(20.0)
assert ae(v, 9.8355252906498816904e-11, tol=ATOL)
assert type(v) is float
v = fp.e1(30.0)
assert ae(v, 3.0215520106888125448e-15, tol=ATOL)
assert type(v) is float
v = fp.e1(40.0)
assert ae(v, 1.0367732614516569722e-19, tol=ATOL)
assert type(v) is float
v = fp.e1(50.0)
assert ae(v, 3.7832640295504590187e-24, tol=ATOL)
assert type(v) is float
v = fp.e1(80.0)
assert ae(v, 2.2285432586884729112e-37, tol=ATOL)
assert type(v) is float
v = fp.e1((1.1641532182693481445e-10 + 0.0j))
assert ae(v, (22.296641293693077672 + 0.0j), tol=ATOL)
assert ae(v.real, 22.296641293693077672, tol=PTOL)
assert v.imag == 0
v = fp.e1((0.25 + 0.0j))
assert ae(v, (1.0442826344437381945 + 0.0j), tol=ATOL)
assert ae(v.real, 1.0442826344437381945, tol=PTOL)
assert v.imag == 0
v = fp.e1((1.0 + 0.0j))
assert ae(v, (0.21938393439552027368 + 0.0j), tol=ATOL)
assert ae(v.real, 0.21938393439552027368, tol=PTOL)
assert v.imag == 0
v = fp.e1((2.0 + 0.0j))
assert ae(v, (0.048900510708061119567 + 0.0j), tol=ATOL)
assert ae(v.real, 0.048900510708061119567, tol=PTOL)
assert v.imag == 0
v = fp.e1((5.0 + 0.0j))
assert ae(v, (0.0011482955912753257973 + 0.0j), tol=ATOL)
assert ae(v.real, 0.0011482955912753257973, tol=PTOL)
assert v.imag == 0
v = fp.e1((20.0 + 0.0j))
assert ae(v, (9.8355252906498816904e-11 + 0.0j), tol=ATOL)
assert ae(v.real, 9.8355252906498816904e-11, tol=PTOL)
assert v.imag == 0
v = fp.e1((30.0 + 0.0j))
assert ae(v, (3.0215520106888125448e-15 + 0.0j), tol=ATOL)
assert ae(v.real, 3.0215520106888125448e-15, tol=PTOL)
assert v.imag == 0
v = fp.e1((40.0 + 0.0j))
assert ae(v, (1.0367732614516569722e-19 + 0.0j), tol=ATOL)
assert ae(v.real, 1.0367732614516569722e-19, tol=PTOL)
assert v.imag == 0
v = fp.e1((50.0 + 0.0j))
assert ae(v, (3.7832640295504590187e-24 + 0.0j), tol=ATOL)
assert ae(v.real, 3.7832640295504590187e-24, tol=PTOL)
assert v.imag == 0
v = fp.e1((80.0 + 0.0j))
assert ae(v, (2.2285432586884729112e-37 + 0.0j), tol=ATOL)
assert ae(v.real, 2.2285432586884729112e-37, tol=PTOL)
assert v.imag == 0
v = fp.e1((4.6566128730773925781e-10 + 1.1641532182693481445e-10j))
assert ae(v, (20.880034622014215597 - 0.24497866301044883237j), tol=ATOL)
assert ae(v.real, 20.880034622014215597, tol=PTOL)
assert ae(v.imag, -0.24497866301044883237, tol=PTOL)
v = fp.e1((1.0 + 0.25j))
assert ae(v, (0.19731063945004229095 - 0.087366045774299963672j), tol=ATOL)
assert ae(v.real, 0.19731063945004229095, tol=PTOL)
assert ae(v.imag, -0.087366045774299963672, tol=PTOL)
v = fp.e1((4.0 + 1.0j))
assert ae(v, (0.0013106173980145506944 - 0.0034542480199350626699j), tol=ATOL)
assert ae(v.real, 0.0013106173980145506944, tol=PTOL)
assert ae(v.imag, -0.0034542480199350626699, tol=PTOL)
v = fp.e1((8.0 + 2.0j))
assert ae(v, (-0.000022278049065270225945 - 0.000029191940456521555288j), tol=ATOL)
assert ae(v.real, -0.000022278049065270225945, tol=PTOL)
assert ae(v.imag, -0.000029191940456521555288, tol=PTOL)
v = fp.e1((20.0 + 5.0j))
assert ae(v, (4.7711374515765346894e-11 + 8.2902652405126947359e-11j), tol=ATOL)
assert ae(v.real, 4.7711374515765346894e-11, tol=PTOL)
assert ae(v.imag, 8.2902652405126947359e-11, tol=PTOL)
v = fp.e1((80.0 + 20.0j))
assert ae(v, (3.8353473865788235787e-38 - 2.129247592349605139e-37j), tol=ATOL)
assert ae(v.real, 3.8353473865788235787e-38, tol=PTOL)
assert ae(v.imag, -2.129247592349605139e-37, tol=PTOL)
v = fp.e1((120.0 + 30.0j))
assert ae(v, (2.3836002337480334716e-55 + 5.6704043587126198306e-55j), tol=ATOL)
assert ae(v.real, 2.3836002337480334716e-55, tol=PTOL)
assert ae(v.imag, 5.6704043587126198306e-55, tol=PTOL)
v = fp.e1((160.0 + 40.0j))
assert ae(v, (-1.6238022898654510661e-72 - 1.104172355572287367e-72j), tol=ATOL)
assert ae(v.real, -1.6238022898654510661e-72, tol=PTOL)
assert ae(v.imag, -1.104172355572287367e-72, tol=PTOL)
v = fp.e1((200.0 + 50.0j))
assert ae(v, (6.6800061461666228487e-90 + 1.4473816083541016115e-91j), tol=ATOL)
assert ae(v.real, 6.6800061461666228487e-90, tol=PTOL)
assert ae(v.imag, 1.4473816083541016115e-91, tol=PTOL)
v = fp.e1((320.0 + 80.0j))
assert ae(v, (4.2737871527778786157e-143 + 3.1789935525785660314e-142j), tol=ATOL)
assert ae(v.real, 4.2737871527778786157e-143, tol=PTOL)
assert ae(v.imag, 3.1789935525785660314e-142, tol=PTOL)
v = fp.e1((1.1641532182693481445e-10 + 1.1641532182693481445e-10j))
assert ae(v, (21.950067703413105017 - 0.7853981632810329878j), tol=ATOL)
assert ae(v.real, 21.950067703413105017, tol=PTOL)
assert ae(v.imag, -0.7853981632810329878, tol=PTOL)
v = fp.e1((0.25 + 0.25j))
assert ae(v, (0.71092525792923287894 - 0.56491812441304194711j), tol=ATOL)
assert ae(v.real, 0.71092525792923287894, tol=PTOL)
assert ae(v.imag, -0.56491812441304194711, tol=PTOL)
v = fp.e1((1.0 + 1.0j))
assert ae(v, (0.00028162445198141832551 - 0.17932453503935894015j), tol=ATOL)
assert ae(v.real, 0.00028162445198141832551, tol=PTOL)
assert ae(v.imag, -0.17932453503935894015, tol=PTOL)
v = fp.e1((2.0 + 2.0j))
assert ae(v, (-0.033767089606562004246 - 0.018599414169750541925j), tol=ATOL)
assert ae(v.real, -0.033767089606562004246, tol=PTOL)
assert ae(v.imag, -0.018599414169750541925, tol=PTOL)
v = fp.e1((5.0 + 5.0j))
assert ae(v, (0.0007266506660356393891 + 0.00047102780163522245054j), tol=ATOL)
assert ae(v.real, 0.0007266506660356393891, tol=PTOL)
assert ae(v.imag, 0.00047102780163522245054, tol=PTOL)
v = fp.e1((20.0 + 20.0j))
assert ae(v, (-2.3824537449367396579e-11 - 6.6969873156525615158e-11j), tol=ATOL)
assert ae(v.real, -2.3824537449367396579e-11, tol=PTOL)
assert ae(v.imag, -6.6969873156525615158e-11, tol=PTOL)
v = fp.e1((30.0 + 30.0j))
assert ae(v, (1.7316045841744061617e-15 + 1.3065678019487308689e-15j), tol=ATOL)
assert ae(v.real, 1.7316045841744061617e-15, tol=PTOL)
assert ae(v.imag, 1.3065678019487308689e-15, tol=PTOL)
v = fp.e1((40.0 + 40.0j))
assert ae(v, (-7.4001043002899232182e-20 - 4.991847855336816304e-21j), tol=ATOL)
assert ae(v.real, -7.4001043002899232182e-20, tol=PTOL)
assert ae(v.imag, -4.991847855336816304e-21, tol=PTOL)
v = fp.e1((50.0 + 50.0j))
assert ae(v, (2.3566128324644641219e-24 - 1.3188326726201614778e-24j), tol=ATOL)
assert ae(v.real, 2.3566128324644641219e-24, tol=PTOL)
assert ae(v.imag, -1.3188326726201614778e-24, tol=PTOL)
v = fp.e1((80.0 + 80.0j))
assert ae(v, (9.8279750572186526673e-38 + 1.243952841288868831e-37j), tol=ATOL)
assert ae(v.real, 9.8279750572186526673e-38, tol=PTOL)
assert ae(v.imag, 1.243952841288868831e-37, tol=PTOL)
v = fp.e1((1.1641532182693481445e-10 + 4.6566128730773925781e-10j))
assert ae(v, (20.880034621664969632 - 1.3258176632023711778j), tol=ATOL)
assert ae(v.real, 20.880034621664969632, tol=PTOL)
assert ae(v.imag, -1.3258176632023711778, tol=PTOL)
v = fp.e1((0.25 + 1.0j))
assert ae(v, (-0.16868306393667788761 - 0.4858011885947426971j), tol=ATOL)
assert ae(v.real, -0.16868306393667788761, tol=PTOL)
assert ae(v.imag, -0.4858011885947426971, tol=PTOL)
v = fp.e1((1.0 + 4.0j))
assert ae(v, (0.03373591813926547318 + 0.073523452241083821877j), tol=ATOL)
assert ae(v.real, 0.03373591813926547318, tol=PTOL)
assert ae(v.imag, 0.073523452241083821877, tol=PTOL)
v = fp.e1((2.0 + 8.0j))
assert ae(v, (-0.015392833434733785143 - 0.0031747121557605415914j), tol=ATOL)
assert ae(v.real, -0.015392833434733785143, tol=PTOL)
assert ae(v.imag, -0.0031747121557605415914, tol=PTOL)
v = fp.e1((5.0 + 20.0j))
assert ae(v, (-0.00024419662286542966525 - 0.00021008322966152755674j), tol=ATOL)
assert ae(v.real, -0.00024419662286542966525, tol=PTOL)
assert ae(v.imag, -0.00021008322966152755674, tol=PTOL)
v = fp.e1((20.0 + 80.0j))
assert ae(v, (2.3255552781051330088e-11 + 8.9463918891349438007e-12j), tol=ATOL)
assert ae(v.real, 2.3255552781051330088e-11, tol=PTOL)
assert ae(v.imag, 8.9463918891349438007e-12, tol=PTOL)
v = fp.e1((30.0 + 120.0j))
assert ae(v, (-2.7068919097124652332e-16 - 7.0477762411705130239e-16j), tol=ATOL)
assert ae(v.real, -2.7068919097124652332e-16, tol=PTOL)
assert ae(v.imag, -7.0477762411705130239e-16, tol=PTOL)
v = fp.e1((40.0 + 160.0j))
assert ae(v, (-1.1695597827678024687e-20 + 2.2907401455645736661e-20j), tol=ATOL)
assert ae(v.real, -1.1695597827678024687e-20, tol=PTOL)
assert ae(v.imag, 2.2907401455645736661e-20, tol=PTOL)
v = fp.e1((50.0 + 200.0j))
assert ae(v, (9.0323746914410162531e-25 - 2.3950601790033530935e-25j), tol=ATOL)
assert ae(v.real, 9.0323746914410162531e-25, tol=PTOL)
assert ae(v.imag, -2.3950601790033530935e-25, tol=PTOL)
v = fp.e1((80.0 + 320.0j))
assert ae(v, (3.4819106748728063576e-38 - 4.215653005615772724e-38j), tol=ATOL)
assert ae(v.real, 3.4819106748728063576e-38, tol=PTOL)
assert ae(v.imag, -4.215653005615772724e-38, tol=PTOL)
v = fp.e1((0.0 + 1.1641532182693481445e-10j))
assert ae(v, (22.29664129357666235 - 1.5707963266784812974j), tol=ATOL)
assert ae(v.real, 22.29664129357666235, tol=PTOL)
assert ae(v.imag, -1.5707963266784812974, tol=PTOL)
v = fp.e1((0.0 + 0.25j))
assert ae(v, (0.82466306258094565309 - 1.3216627564751394551j), tol=ATOL)
assert ae(v.real, 0.82466306258094565309, tol=PTOL)
assert ae(v.imag, -1.3216627564751394551, tol=PTOL)
v = fp.e1((0.0 + 1.0j))
assert ae(v, (-0.33740392290096813466 - 0.62471325642771360429j), tol=ATOL)
assert ae(v.real, -0.33740392290096813466, tol=PTOL)
assert ae(v.imag, -0.62471325642771360429, tol=PTOL)
v = fp.e1((0.0 + 2.0j))
assert ae(v, (-0.4229808287748649957 + 0.034616650007798229345j), tol=ATOL)
assert ae(v.real, -0.4229808287748649957, tol=PTOL)
assert ae(v.imag, 0.034616650007798229345, tol=PTOL)
v = fp.e1((0.0 + 5.0j))
assert ae(v, (0.19002974965664387862 - 0.020865081850222481957j), tol=ATOL)
assert ae(v.real, 0.19002974965664387862, tol=PTOL)
assert ae(v.imag, -0.020865081850222481957, tol=PTOL)
v = fp.e1((0.0 + 20.0j))
assert ae(v, (-0.04441982084535331654 - 0.022554625751456779068j), tol=ATOL)
assert ae(v.real, -0.04441982084535331654, tol=PTOL)
assert ae(v.imag, -0.022554625751456779068, tol=PTOL)
v = fp.e1((0.0 + 30.0j))
assert ae(v, (0.033032417282071143779 - 0.0040397867645455082476j), tol=ATOL)
assert ae(v.real, 0.033032417282071143779, tol=PTOL)
assert ae(v.imag, -0.0040397867645455082476, tol=PTOL)
v = fp.e1((0.0 + 40.0j))
assert ae(v, (-0.019020007896208766962 + 0.016188792559887887544j), tol=ATOL)
assert ae(v.real, -0.019020007896208766962, tol=PTOL)
assert ae(v.imag, 0.016188792559887887544, tol=PTOL)
v = fp.e1((0.0 + 50.0j))
assert ae(v, (0.0056283863241163054402 - 0.019179254308960724503j), tol=ATOL)
assert ae(v.real, 0.0056283863241163054402, tol=PTOL)
assert ae(v.imag, -0.019179254308960724503, tol=PTOL)
v = fp.e1((0.0 + 80.0j))
assert ae(v, (0.012402501155070958192 + 0.0015345601175906961199j), tol=ATOL)
assert ae(v.real, 0.012402501155070958192, tol=PTOL)
assert ae(v.imag, 0.0015345601175906961199, tol=PTOL)
v = fp.e1((-1.1641532182693481445e-10 + 4.6566128730773925781e-10j))
assert ae(v, (20.880034621432138988 - 1.8157749894560994861j), tol=ATOL)
assert ae(v.real, 20.880034621432138988, tol=PTOL)
assert ae(v.imag, -1.8157749894560994861, tol=PTOL)
v = fp.e1((-0.25 + 1.0j))
assert ae(v, (-0.59066621214766308594 - 0.74474454765205036972j), tol=ATOL)
assert ae(v.real, -0.59066621214766308594, tol=PTOL)
assert ae(v.imag, -0.74474454765205036972, tol=PTOL)
v = fp.e1((-1.0 + 4.0j))
assert ae(v, (0.49739047283060471093 + 0.41543605404038863174j), tol=ATOL)
assert ae(v.real, 0.49739047283060471093, tol=PTOL)
assert ae(v.imag, 0.41543605404038863174, tol=PTOL)
v = fp.e1((-2.0 + 8.0j))
assert ae(v, (-0.8705211147733730969 + 0.24099328498605539667j), tol=ATOL)
assert ae(v.real, -0.8705211147733730969, tol=PTOL)
assert ae(v.imag, 0.24099328498605539667, tol=PTOL)
v = fp.e1((-5.0 + 20.0j))
assert ae(v, (-7.0789514293925893007 - 1.6102177171960790536j), tol=ATOL)
assert ae(v.real, -7.0789514293925893007, tol=PTOL)
assert ae(v.imag, -1.6102177171960790536, tol=PTOL)
v = fp.e1((-20.0 + 80.0j))
assert ae(v, (5855431.4907298084434 - 720920.93315409165707j), tol=ATOL)
assert ae(v.real, 5855431.4907298084434, tol=PTOL)
assert ae(v.imag, -720920.93315409165707, tol=PTOL)
v = fp.e1((-30.0 + 120.0j))
assert ae(v, (-65402491644.703470747 - 56697658399.657460294j), tol=ATOL)
assert ae(v.real, -65402491644.703470747, tol=PTOL)
assert ae(v.imag, -56697658399.657460294, tol=PTOL)
v = fp.e1((-40.0 + 160.0j))
assert ae(v, (25504929379604.776769 + 1429035198630573.2463j), tol=ATOL)
assert ae(v.real, 25504929379604.776769, tol=PTOL)
assert ae(v.imag, 1429035198630573.2463, tol=PTOL)
v = fp.e1((-50.0 + 200.0j))
assert ae(v, (18437746526988116954.0 - 17146362239046152345.0j), tol=ATOL)
assert ae(v.real, 18437746526988116954.0, tol=PTOL)
assert ae(v.imag, -17146362239046152345.0, tol=PTOL)
v = fp.e1((-80.0 + 320.0j))
assert ae(v, (3.3464697299634526706e+31 - 1.6473152633843023919e+32j), tol=ATOL)
assert ae(v.real, 3.3464697299634526706e+31, tol=PTOL)
assert ae(v.imag, -1.6473152633843023919e+32, tol=PTOL)
v = fp.e1((-4.6566128730773925781e-10 + 1.1641532182693481445e-10j))
assert ae(v, (20.880034621082893023 - 2.8966139903465137624j), tol=ATOL)
assert ae(v.real, 20.880034621082893023, tol=PTOL)
assert ae(v.imag, -2.8966139903465137624, tol=PTOL)
v = fp.e1((-1.0 + 0.25j))
assert ae(v, (-1.8942716983721074932 - 2.4689102827070540799j), tol=ATOL)
assert ae(v.real, -1.8942716983721074932, tol=PTOL)
assert ae(v.imag, -2.4689102827070540799, tol=PTOL)
v = fp.e1((-4.0 + 1.0j))
assert ae(v, (-14.806699492675420438 + 9.1384225230837893776j), tol=ATOL)
assert ae(v.real, -14.806699492675420438, tol=PTOL)
assert ae(v.imag, 9.1384225230837893776, tol=PTOL)
v = fp.e1((-8.0 + 2.0j))
assert ae(v, (54.633252667426386294 + 413.20318163814670688j), tol=ATOL)
assert ae(v.real, 54.633252667426386294, tol=PTOL)
assert ae(v.imag, 413.20318163814670688, tol=PTOL)
v = fp.e1((-20.0 + 5.0j))
assert ae(v, (-711836.97165402624643 - 24745250.939695900956j), tol=ATOL)
assert ae(v.real, -711836.97165402624643, tol=PTOL)
assert ae(v.imag, -24745250.939695900956, tol=PTOL)
v = fp.e1((-80.0 + 20.0j))
assert ae(v, (-4.2139911108612653091e+32 + 5.3367124741918251637e+32j), tol=ATOL)
assert ae(v.real, -4.2139911108612653091e+32, tol=PTOL)
assert ae(v.imag, 5.3367124741918251637e+32, tol=PTOL)
v = fp.e1((-120.0 + 30.0j))
assert ae(v, (9.7760616203707508892e+48 - 1.058257682317195792e+50j), tol=ATOL)
assert ae(v.real, 9.7760616203707508892e+48, tol=PTOL)
assert ae(v.imag, -1.058257682317195792e+50, tol=PTOL)
v = fp.e1((-160.0 + 40.0j))
assert ae(v, (8.7065541466623638861e+66 + 1.6577106725141739889e+67j), tol=ATOL)
assert ae(v.real, 8.7065541466623638861e+66, tol=PTOL)
assert ae(v.imag, 1.6577106725141739889e+67, tol=PTOL)
v = fp.e1((-200.0 + 50.0j))
assert ae(v, (-3.070744996327018106e+84 - 1.7243244846769415903e+84j), tol=ATOL)
assert ae(v.real, -3.070744996327018106e+84, tol=PTOL)
assert ae(v.imag, -1.7243244846769415903e+84, tol=PTOL)
v = fp.e1((-320.0 + 80.0j))
assert ae(v, (9.9960598637998647276e+135 - 2.6855081527595608863e+136j), tol=ATOL)
assert ae(v.real, 9.9960598637998647276e+135, tol=PTOL)
assert ae(v.imag, -2.6855081527595608863e+136, tol=PTOL)
v = fp.e1(-1.1641532182693481445e-10)
assert ae(v, (22.296641293460247028 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 22.296641293460247028, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-0.25)
assert ae(v, (0.54254326466191372953 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 0.54254326466191372953, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-1.0)
assert ae(v, (-1.8951178163559367555 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -1.8951178163559367555, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-2.0)
assert ae(v, (-4.9542343560018901634 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -4.9542343560018901634, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-5.0)
assert ae(v, (-40.185275355803177455 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -40.185275355803177455, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-20.0)
assert ae(v, (-25615652.66405658882 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -25615652.66405658882, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-30.0)
assert ae(v, (-368973209407.27419706 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -368973209407.27419706, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-40.0)
assert ae(v, (-6039718263611241.5784 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -6039718263611241.5784, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-50.0)
assert ae(v, (-1.0585636897131690963e+20 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -1.0585636897131690963e+20, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1(-80.0)
assert ae(v, (-7.0146000049047999696e+32 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -7.0146000049047999696e+32, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-1.1641532182693481445e-10 + 0.0j))
assert ae(v, (22.296641293460247028 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 22.296641293460247028, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-0.25 + 0.0j))
assert ae(v, (0.54254326466191372953 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 0.54254326466191372953, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-1.0 + 0.0j))
assert ae(v, (-1.8951178163559367555 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -1.8951178163559367555, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-2.0 + 0.0j))
assert ae(v, (-4.9542343560018901634 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -4.9542343560018901634, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-5.0 + 0.0j))
assert ae(v, (-40.185275355803177455 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -40.185275355803177455, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-20.0 + 0.0j))
assert ae(v, (-25615652.66405658882 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -25615652.66405658882, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-30.0 + 0.0j))
assert ae(v, (-368973209407.27419706 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -368973209407.27419706, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-40.0 + 0.0j))
assert ae(v, (-6039718263611241.5784 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -6039718263611241.5784, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-50.0 + 0.0j))
assert ae(v, (-1.0585636897131690963e+20 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -1.0585636897131690963e+20, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-80.0 + 0.0j))
assert ae(v, (-7.0146000049047999696e+32 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -7.0146000049047999696e+32, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.e1((-4.6566128730773925781e-10 - 1.1641532182693481445e-10j))
assert ae(v, (20.880034621082893023 + 2.8966139903465137624j), tol=ATOL)
assert ae(v.real, 20.880034621082893023, tol=PTOL)
assert ae(v.imag, 2.8966139903465137624, tol=PTOL)
v = fp.e1((-1.0 - 0.25j))
assert ae(v, (-1.8942716983721074932 + 2.4689102827070540799j), tol=ATOL)
assert ae(v.real, -1.8942716983721074932, tol=PTOL)
assert ae(v.imag, 2.4689102827070540799, tol=PTOL)
v = fp.e1((-4.0 - 1.0j))
assert ae(v, (-14.806699492675420438 - 9.1384225230837893776j), tol=ATOL)
assert ae(v.real, -14.806699492675420438, tol=PTOL)
assert ae(v.imag, -9.1384225230837893776, tol=PTOL)
v = fp.e1((-8.0 - 2.0j))
assert ae(v, (54.633252667426386294 - 413.20318163814670688j), tol=ATOL)
assert ae(v.real, 54.633252667426386294, tol=PTOL)
assert ae(v.imag, -413.20318163814670688, tol=PTOL)
v = fp.e1((-20.0 - 5.0j))
assert ae(v, (-711836.97165402624643 + 24745250.939695900956j), tol=ATOL)
assert ae(v.real, -711836.97165402624643, tol=PTOL)
assert ae(v.imag, 24745250.939695900956, tol=PTOL)
v = fp.e1((-80.0 - 20.0j))
assert ae(v, (-4.2139911108612653091e+32 - 5.3367124741918251637e+32j), tol=ATOL)
assert ae(v.real, -4.2139911108612653091e+32, tol=PTOL)
assert ae(v.imag, -5.3367124741918251637e+32, tol=PTOL)
v = fp.e1((-120.0 - 30.0j))
assert ae(v, (9.7760616203707508892e+48 + 1.058257682317195792e+50j), tol=ATOL)
assert ae(v.real, 9.7760616203707508892e+48, tol=PTOL)
assert ae(v.imag, 1.058257682317195792e+50, tol=PTOL)
v = fp.e1((-160.0 - 40.0j))
assert ae(v, (8.7065541466623638861e+66 - 1.6577106725141739889e+67j), tol=ATOL)
assert ae(v.real, 8.7065541466623638861e+66, tol=PTOL)
assert ae(v.imag, -1.6577106725141739889e+67, tol=PTOL)
v = fp.e1((-200.0 - 50.0j))
assert ae(v, (-3.070744996327018106e+84 + 1.7243244846769415903e+84j), tol=ATOL)
assert ae(v.real, -3.070744996327018106e+84, tol=PTOL)
assert ae(v.imag, 1.7243244846769415903e+84, tol=PTOL)
v = fp.e1((-320.0 - 80.0j))
assert ae(v, (9.9960598637998647276e+135 + 2.6855081527595608863e+136j), tol=ATOL)
assert ae(v.real, 9.9960598637998647276e+135, tol=PTOL)
assert ae(v.imag, 2.6855081527595608863e+136, tol=PTOL)
v = fp.e1((-1.1641532182693481445e-10 - 1.1641532182693481445e-10j))
assert ae(v, (21.950067703180274374 + 2.356194490075929607j), tol=ATOL)
assert ae(v.real, 21.950067703180274374, tol=PTOL)
assert ae(v.imag, 2.356194490075929607, tol=PTOL)
v = fp.e1((-0.25 - 0.25j))
assert ae(v, (0.21441047326710323254 + 2.0732153554307936389j), tol=ATOL)
assert ae(v.real, 0.21441047326710323254, tol=PTOL)
assert ae(v.imag, 2.0732153554307936389, tol=PTOL)
v = fp.e1((-1.0 - 1.0j))
assert ae(v, (-1.7646259855638540684 + 0.7538228020792708192j), tol=ATOL)
assert ae(v.real, -1.7646259855638540684, tol=PTOL)
assert ae(v.imag, 0.7538228020792708192, tol=PTOL)
v = fp.e1((-2.0 - 2.0j))
assert ae(v, (-1.8920781621855474089 - 2.1753697842428647236j), tol=ATOL)
assert ae(v.real, -1.8920781621855474089, tol=PTOL)
assert ae(v.imag, -2.1753697842428647236, tol=PTOL)
v = fp.e1((-5.0 - 5.0j))
assert ae(v, (13.470936071475245856 + 18.464085049321024206j), tol=ATOL)
assert ae(v.real, 13.470936071475245856, tol=PTOL)
assert ae(v.imag, 18.464085049321024206, tol=PTOL)
v = fp.e1((-20.0 - 20.0j))
assert ae(v, (-16589317.398788971896 - 5831702.3296441771206j), tol=ATOL)
assert ae(v.real, -16589317.398788971896, tol=PTOL)
assert ae(v.imag, -5831702.3296441771206, tol=PTOL)
v = fp.e1((-30.0 - 30.0j))
assert ae(v, (154596484273.69322527 + 204179357837.41389696j), tol=ATOL)
assert ae(v.real, 154596484273.69322527, tol=PTOL)
assert ae(v.imag, 204179357837.41389696, tol=PTOL)
v = fp.e1((-40.0 - 40.0j))
assert ae(v, (-287512180321448.45408 - 4203502407932314.974j), tol=ATOL)
assert ae(v.real, -287512180321448.45408, tol=PTOL)
assert ae(v.imag, -4203502407932314.974, tol=PTOL)
v = fp.e1((-50.0 - 50.0j))
assert ae(v, (-36128528616649268826.0 + 64648801861338741963.0j), tol=ATOL)
assert ae(v.real, -36128528616649268826.0, tol=PTOL)
assert ae(v.imag, 64648801861338741963.0, tol=PTOL)
v = fp.e1((-80.0 - 80.0j))
assert ae(v, (3.8674816337930010217e+32 + 3.0540709639658071041e+32j), tol=ATOL)
assert ae(v.real, 3.8674816337930010217e+32, tol=PTOL)
assert ae(v.imag, 3.0540709639658071041e+32, tol=PTOL)
v = fp.e1((-1.1641532182693481445e-10 - 4.6566128730773925781e-10j))
assert ae(v, (20.880034621432138988 + 1.8157749894560994861j), tol=ATOL)
assert ae(v.real, 20.880034621432138988, tol=PTOL)
assert ae(v.imag, 1.8157749894560994861, tol=PTOL)
v = fp.e1((-0.25 - 1.0j))
assert ae(v, (-0.59066621214766308594 + 0.74474454765205036972j), tol=ATOL)
assert ae(v.real, -0.59066621214766308594, tol=PTOL)
assert ae(v.imag, 0.74474454765205036972, tol=PTOL)
v = fp.e1((-1.0 - 4.0j))
assert ae(v, (0.49739047283060471093 - 0.41543605404038863174j), tol=ATOL)
assert ae(v.real, 0.49739047283060471093, tol=PTOL)
assert ae(v.imag, -0.41543605404038863174, tol=PTOL)
v = fp.e1((-2.0 - 8.0j))
assert ae(v, (-0.8705211147733730969 - 0.24099328498605539667j), tol=ATOL)
assert ae(v.real, -0.8705211147733730969, tol=PTOL)
assert ae(v.imag, -0.24099328498605539667, tol=PTOL)
v = fp.e1((-5.0 - 20.0j))
assert ae(v, (-7.0789514293925893007 + 1.6102177171960790536j), tol=ATOL)
assert ae(v.real, -7.0789514293925893007, tol=PTOL)
assert ae(v.imag, 1.6102177171960790536, tol=PTOL)
v = fp.e1((-20.0 - 80.0j))
assert ae(v, (5855431.4907298084434 + 720920.93315409165707j), tol=ATOL)
assert ae(v.real, 5855431.4907298084434, tol=PTOL)
assert ae(v.imag, 720920.93315409165707, tol=PTOL)
v = fp.e1((-30.0 - 120.0j))
assert ae(v, (-65402491644.703470747 + 56697658399.657460294j), tol=ATOL)
assert ae(v.real, -65402491644.703470747, tol=PTOL)
assert ae(v.imag, 56697658399.657460294, tol=PTOL)
v = fp.e1((-40.0 - 160.0j))
assert ae(v, (25504929379604.776769 - 1429035198630573.2463j), tol=ATOL)
assert ae(v.real, 25504929379604.776769, tol=PTOL)
assert ae(v.imag, -1429035198630573.2463, tol=PTOL)
v = fp.e1((-50.0 - 200.0j))
assert ae(v, (18437746526988116954.0 + 17146362239046152345.0j), tol=ATOL)
assert ae(v.real, 18437746526988116954.0, tol=PTOL)
assert ae(v.imag, 17146362239046152345.0, tol=PTOL)
v = fp.e1((-80.0 - 320.0j))
assert ae(v, (3.3464697299634526706e+31 + 1.6473152633843023919e+32j), tol=ATOL)
assert ae(v.real, 3.3464697299634526706e+31, tol=PTOL)
assert ae(v.imag, 1.6473152633843023919e+32, tol=PTOL)
v = fp.e1((0.0 - 1.1641532182693481445e-10j))
assert ae(v, (22.29664129357666235 + 1.5707963266784812974j), tol=ATOL)
assert ae(v.real, 22.29664129357666235, tol=PTOL)
assert ae(v.imag, 1.5707963266784812974, tol=PTOL)
v = fp.e1((0.0 - 0.25j))
assert ae(v, (0.82466306258094565309 + 1.3216627564751394551j), tol=ATOL)
assert ae(v.real, 0.82466306258094565309, tol=PTOL)
assert ae(v.imag, 1.3216627564751394551, tol=PTOL)
v = fp.e1((0.0 - 1.0j))
assert ae(v, (-0.33740392290096813466 + 0.62471325642771360429j), tol=ATOL)
assert ae(v.real, -0.33740392290096813466, tol=PTOL)
assert ae(v.imag, 0.62471325642771360429, tol=PTOL)
v = fp.e1((0.0 - 2.0j))
assert ae(v, (-0.4229808287748649957 - 0.034616650007798229345j), tol=ATOL)
assert ae(v.real, -0.4229808287748649957, tol=PTOL)
assert ae(v.imag, -0.034616650007798229345, tol=PTOL)
v = fp.e1((0.0 - 5.0j))
assert ae(v, (0.19002974965664387862 + 0.020865081850222481957j), tol=ATOL)
assert ae(v.real, 0.19002974965664387862, tol=PTOL)
assert ae(v.imag, 0.020865081850222481957, tol=PTOL)
v = fp.e1((0.0 - 20.0j))
assert ae(v, (-0.04441982084535331654 + 0.022554625751456779068j), tol=ATOL)
assert ae(v.real, -0.04441982084535331654, tol=PTOL)
assert ae(v.imag, 0.022554625751456779068, tol=PTOL)
v = fp.e1((0.0 - 30.0j))
assert ae(v, (0.033032417282071143779 + 0.0040397867645455082476j), tol=ATOL)
assert ae(v.real, 0.033032417282071143779, tol=PTOL)
assert ae(v.imag, 0.0040397867645455082476, tol=PTOL)
v = fp.e1((0.0 - 40.0j))
assert ae(v, (-0.019020007896208766962 - 0.016188792559887887544j), tol=ATOL)
assert ae(v.real, -0.019020007896208766962, tol=PTOL)
assert ae(v.imag, -0.016188792559887887544, tol=PTOL)
v = fp.e1((0.0 - 50.0j))
assert ae(v, (0.0056283863241163054402 + 0.019179254308960724503j), tol=ATOL)
assert ae(v.real, 0.0056283863241163054402, tol=PTOL)
assert ae(v.imag, 0.019179254308960724503, tol=PTOL)
v = fp.e1((0.0 - 80.0j))
assert ae(v, (0.012402501155070958192 - 0.0015345601175906961199j), tol=ATOL)
assert ae(v.real, 0.012402501155070958192, tol=PTOL)
assert ae(v.imag, -0.0015345601175906961199, tol=PTOL)
v = fp.e1((1.1641532182693481445e-10 - 4.6566128730773925781e-10j))
assert ae(v, (20.880034621664969632 + 1.3258176632023711778j), tol=ATOL)
assert ae(v.real, 20.880034621664969632, tol=PTOL)
assert ae(v.imag, 1.3258176632023711778, tol=PTOL)
v = fp.e1((0.25 - 1.0j))
assert ae(v, (-0.16868306393667788761 + 0.4858011885947426971j), tol=ATOL)
assert ae(v.real, -0.16868306393667788761, tol=PTOL)
assert ae(v.imag, 0.4858011885947426971, tol=PTOL)
v = fp.e1((1.0 - 4.0j))
assert ae(v, (0.03373591813926547318 - 0.073523452241083821877j), tol=ATOL)
assert ae(v.real, 0.03373591813926547318, tol=PTOL)
assert ae(v.imag, -0.073523452241083821877, tol=PTOL)
v = fp.e1((2.0 - 8.0j))
assert ae(v, (-0.015392833434733785143 + 0.0031747121557605415914j), tol=ATOL)
assert ae(v.real, -0.015392833434733785143, tol=PTOL)
assert ae(v.imag, 0.0031747121557605415914, tol=PTOL)
v = fp.e1((5.0 - 20.0j))
assert ae(v, (-0.00024419662286542966525 + 0.00021008322966152755674j), tol=ATOL)
assert ae(v.real, -0.00024419662286542966525, tol=PTOL)
assert ae(v.imag, 0.00021008322966152755674, tol=PTOL)
v = fp.e1((20.0 - 80.0j))
assert ae(v, (2.3255552781051330088e-11 - 8.9463918891349438007e-12j), tol=ATOL)
assert ae(v.real, 2.3255552781051330088e-11, tol=PTOL)
assert ae(v.imag, -8.9463918891349438007e-12, tol=PTOL)
v = fp.e1((30.0 - 120.0j))
assert ae(v, (-2.7068919097124652332e-16 + 7.0477762411705130239e-16j), tol=ATOL)
assert ae(v.real, -2.7068919097124652332e-16, tol=PTOL)
assert ae(v.imag, 7.0477762411705130239e-16, tol=PTOL)
v = fp.e1((40.0 - 160.0j))
assert ae(v, (-1.1695597827678024687e-20 - 2.2907401455645736661e-20j), tol=ATOL)
assert ae(v.real, -1.1695597827678024687e-20, tol=PTOL)
assert ae(v.imag, -2.2907401455645736661e-20, tol=PTOL)
v = fp.e1((50.0 - 200.0j))
assert ae(v, (9.0323746914410162531e-25 + 2.3950601790033530935e-25j), tol=ATOL)
assert ae(v.real, 9.0323746914410162531e-25, tol=PTOL)
assert ae(v.imag, 2.3950601790033530935e-25, tol=PTOL)
v = fp.e1((80.0 - 320.0j))
assert ae(v, (3.4819106748728063576e-38 + 4.215653005615772724e-38j), tol=ATOL)
assert ae(v.real, 3.4819106748728063576e-38, tol=PTOL)
assert ae(v.imag, 4.215653005615772724e-38, tol=PTOL)
v = fp.e1((1.1641532182693481445e-10 - 1.1641532182693481445e-10j))
assert ae(v, (21.950067703413105017 + 0.7853981632810329878j), tol=ATOL)
assert ae(v.real, 21.950067703413105017, tol=PTOL)
assert ae(v.imag, 0.7853981632810329878, tol=PTOL)
v = fp.e1((0.25 - 0.25j))
assert ae(v, (0.71092525792923287894 + 0.56491812441304194711j), tol=ATOL)
assert ae(v.real, 0.71092525792923287894, tol=PTOL)
assert ae(v.imag, 0.56491812441304194711, tol=PTOL)
v = fp.e1((1.0 - 1.0j))
assert ae(v, (0.00028162445198141832551 + 0.17932453503935894015j), tol=ATOL)
assert ae(v.real, 0.00028162445198141832551, tol=PTOL)
assert ae(v.imag, 0.17932453503935894015, tol=PTOL)
v = fp.e1((2.0 - 2.0j))
assert ae(v, (-0.033767089606562004246 + 0.018599414169750541925j), tol=ATOL)
assert ae(v.real, -0.033767089606562004246, tol=PTOL)
assert ae(v.imag, 0.018599414169750541925, tol=PTOL)
v = fp.e1((5.0 - 5.0j))
assert ae(v, (0.0007266506660356393891 - 0.00047102780163522245054j), tol=ATOL)
assert ae(v.real, 0.0007266506660356393891, tol=PTOL)
assert ae(v.imag, -0.00047102780163522245054, tol=PTOL)
v = fp.e1((20.0 - 20.0j))
assert ae(v, (-2.3824537449367396579e-11 + 6.6969873156525615158e-11j), tol=ATOL)
assert ae(v.real, -2.3824537449367396579e-11, tol=PTOL)
assert ae(v.imag, 6.6969873156525615158e-11, tol=PTOL)
v = fp.e1((30.0 - 30.0j))
assert ae(v, (1.7316045841744061617e-15 - 1.3065678019487308689e-15j), tol=ATOL)
assert ae(v.real, 1.7316045841744061617e-15, tol=PTOL)
assert ae(v.imag, -1.3065678019487308689e-15, tol=PTOL)
v = fp.e1((40.0 - 40.0j))
assert ae(v, (-7.4001043002899232182e-20 + 4.991847855336816304e-21j), tol=ATOL)
assert ae(v.real, -7.4001043002899232182e-20, tol=PTOL)
assert ae(v.imag, 4.991847855336816304e-21, tol=PTOL)
v = fp.e1((50.0 - 50.0j))
assert ae(v, (2.3566128324644641219e-24 + 1.3188326726201614778e-24j), tol=ATOL)
assert ae(v.real, 2.3566128324644641219e-24, tol=PTOL)
assert ae(v.imag, 1.3188326726201614778e-24, tol=PTOL)
v = fp.e1((80.0 - 80.0j))
assert ae(v, (9.8279750572186526673e-38 - 1.243952841288868831e-37j), tol=ATOL)
assert ae(v.real, 9.8279750572186526673e-38, tol=PTOL)
assert ae(v.imag, -1.243952841288868831e-37, tol=PTOL)
v = fp.e1((4.6566128730773925781e-10 - 1.1641532182693481445e-10j))
assert ae(v, (20.880034622014215597 + 0.24497866301044883237j), tol=ATOL)
assert ae(v.real, 20.880034622014215597, tol=PTOL)
assert ae(v.imag, 0.24497866301044883237, tol=PTOL)
v = fp.e1((1.0 - 0.25j))
assert ae(v, (0.19731063945004229095 + 0.087366045774299963672j), tol=ATOL)
assert ae(v.real, 0.19731063945004229095, tol=PTOL)
assert ae(v.imag, 0.087366045774299963672, tol=PTOL)
v = fp.e1((4.0 - 1.0j))
assert ae(v, (0.0013106173980145506944 + 0.0034542480199350626699j), tol=ATOL)
assert ae(v.real, 0.0013106173980145506944, tol=PTOL)
assert ae(v.imag, 0.0034542480199350626699, tol=PTOL)
v = fp.e1((8.0 - 2.0j))
assert ae(v, (-0.000022278049065270225945 + 0.000029191940456521555288j), tol=ATOL)
assert ae(v.real, -0.000022278049065270225945, tol=PTOL)
assert ae(v.imag, 0.000029191940456521555288, tol=PTOL)
v = fp.e1((20.0 - 5.0j))
assert ae(v, (4.7711374515765346894e-11 - 8.2902652405126947359e-11j), tol=ATOL)
assert ae(v.real, 4.7711374515765346894e-11, tol=PTOL)
assert ae(v.imag, -8.2902652405126947359e-11, tol=PTOL)
v = fp.e1((80.0 - 20.0j))
assert ae(v, (3.8353473865788235787e-38 + 2.129247592349605139e-37j), tol=ATOL)
assert ae(v.real, 3.8353473865788235787e-38, tol=PTOL)
assert ae(v.imag, 2.129247592349605139e-37, tol=PTOL)
v = fp.e1((120.0 - 30.0j))
assert ae(v, (2.3836002337480334716e-55 - 5.6704043587126198306e-55j), tol=ATOL)
assert ae(v.real, 2.3836002337480334716e-55, tol=PTOL)
assert ae(v.imag, -5.6704043587126198306e-55, tol=PTOL)
v = fp.e1((160.0 - 40.0j))
assert ae(v, (-1.6238022898654510661e-72 + 1.104172355572287367e-72j), tol=ATOL)
assert ae(v.real, -1.6238022898654510661e-72, tol=PTOL)
assert ae(v.imag, 1.104172355572287367e-72, tol=PTOL)
v = fp.e1((200.0 - 50.0j))
assert ae(v, (6.6800061461666228487e-90 - 1.4473816083541016115e-91j), tol=ATOL)
assert ae(v.real, 6.6800061461666228487e-90, tol=PTOL)
assert ae(v.imag, -1.4473816083541016115e-91, tol=PTOL)
v = fp.e1((320.0 - 80.0j))
assert ae(v, (4.2737871527778786157e-143 - 3.1789935525785660314e-142j), tol=ATOL)
assert ae(v.real, 4.2737871527778786157e-143, tol=PTOL)
assert ae(v.imag, -3.1789935525785660314e-142, tol=PTOL)
v = fp.ei(1.1641532182693481445e-10)
assert ae(v, -22.296641293460247028, tol=ATOL)
assert type(v) is float
v = fp.ei(0.25)
assert ae(v, -0.54254326466191372953, tol=ATOL)
assert type(v) is float
v = fp.ei(1.0)
assert ae(v, 1.8951178163559367555, tol=ATOL)
assert type(v) is float
v = fp.ei(2.0)
assert ae(v, 4.9542343560018901634, tol=ATOL)
assert type(v) is float
v = fp.ei(5.0)
assert ae(v, 40.185275355803177455, tol=ATOL)
assert type(v) is float
v = fp.ei(20.0)
assert ae(v, 25615652.66405658882, tol=ATOL)
assert type(v) is float
v = fp.ei(30.0)
assert ae(v, 368973209407.27419706, tol=ATOL)
assert type(v) is float
v = fp.ei(40.0)
assert ae(v, 6039718263611241.5784, tol=ATOL)
assert type(v) is float
v = fp.ei(50.0)
assert ae(v, 1.0585636897131690963e+20, tol=ATOL)
assert type(v) is float
v = fp.ei(80.0)
assert ae(v, 7.0146000049047999696e+32, tol=ATOL)
assert type(v) is float
v = fp.ei((1.1641532182693481445e-10 + 0.0j))
assert ae(v, (-22.296641293460247028 + 0.0j), tol=ATOL)
assert ae(v.real, -22.296641293460247028, tol=PTOL)
assert v.imag == 0
v = fp.ei((0.25 + 0.0j))
assert ae(v, (-0.54254326466191372953 + 0.0j), tol=ATOL)
assert ae(v.real, -0.54254326466191372953, tol=PTOL)
assert v.imag == 0
v = fp.ei((1.0 + 0.0j))
assert ae(v, (1.8951178163559367555 + 0.0j), tol=ATOL)
assert ae(v.real, 1.8951178163559367555, tol=PTOL)
assert v.imag == 0
v = fp.ei((2.0 + 0.0j))
assert ae(v, (4.9542343560018901634 + 0.0j), tol=ATOL)
assert ae(v.real, 4.9542343560018901634, tol=PTOL)
assert v.imag == 0
v = fp.ei((5.0 + 0.0j))
assert ae(v, (40.185275355803177455 + 0.0j), tol=ATOL)
assert ae(v.real, 40.185275355803177455, tol=PTOL)
assert v.imag == 0
v = fp.ei((20.0 + 0.0j))
assert ae(v, (25615652.66405658882 + 0.0j), tol=ATOL)
assert ae(v.real, 25615652.66405658882, tol=PTOL)
assert v.imag == 0
v = fp.ei((30.0 + 0.0j))
assert ae(v, (368973209407.27419706 + 0.0j), tol=ATOL)
assert ae(v.real, 368973209407.27419706, tol=PTOL)
assert v.imag == 0
v = fp.ei((40.0 + 0.0j))
assert ae(v, (6039718263611241.5784 + 0.0j), tol=ATOL)
assert ae(v.real, 6039718263611241.5784, tol=PTOL)
assert v.imag == 0
v = fp.ei((50.0 + 0.0j))
assert ae(v, (1.0585636897131690963e+20 + 0.0j), tol=ATOL)
assert ae(v.real, 1.0585636897131690963e+20, tol=PTOL)
assert v.imag == 0
v = fp.ei((80.0 + 0.0j))
assert ae(v, (7.0146000049047999696e+32 + 0.0j), tol=ATOL)
assert ae(v.real, 7.0146000049047999696e+32, tol=PTOL)
assert v.imag == 0
v = fp.ei((4.6566128730773925781e-10 + 1.1641532182693481445e-10j))
assert ae(v, (-20.880034621082893023 + 0.24497866324327947603j), tol=ATOL)
assert ae(v.real, -20.880034621082893023, tol=PTOL)
assert ae(v.imag, 0.24497866324327947603, tol=PTOL)
v = fp.ei((1.0 + 0.25j))
assert ae(v, (1.8942716983721074932 + 0.67268237088273915854j), tol=ATOL)
assert ae(v.real, 1.8942716983721074932, tol=PTOL)
assert ae(v.imag, 0.67268237088273915854, tol=PTOL)
v = fp.ei((4.0 + 1.0j))
assert ae(v, (14.806699492675420438 + 12.280015176673582616j), tol=ATOL)
assert ae(v.real, 14.806699492675420438, tol=PTOL)
assert ae(v.imag, 12.280015176673582616, tol=PTOL)
v = fp.ei((8.0 + 2.0j))
assert ae(v, (-54.633252667426386294 + 416.34477429173650012j), tol=ATOL)
assert ae(v.real, -54.633252667426386294, tol=PTOL)
assert ae(v.imag, 416.34477429173650012, tol=PTOL)
v = fp.ei((20.0 + 5.0j))
assert ae(v, (711836.97165402624643 - 24745247.798103247366j), tol=ATOL)
assert ae(v.real, 711836.97165402624643, tol=PTOL)
assert ae(v.imag, -24745247.798103247366, tol=PTOL)
v = fp.ei((80.0 + 20.0j))
assert ae(v, (4.2139911108612653091e+32 + 5.3367124741918251637e+32j), tol=ATOL)
assert ae(v.real, 4.2139911108612653091e+32, tol=PTOL)
assert ae(v.imag, 5.3367124741918251637e+32, tol=PTOL)
v = fp.ei((120.0 + 30.0j))
assert ae(v, (-9.7760616203707508892e+48 - 1.058257682317195792e+50j), tol=ATOL)
assert ae(v.real, -9.7760616203707508892e+48, tol=PTOL)
assert ae(v.imag, -1.058257682317195792e+50, tol=PTOL)
v = fp.ei((160.0 + 40.0j))
assert ae(v, (-8.7065541466623638861e+66 + 1.6577106725141739889e+67j), tol=ATOL)
assert ae(v.real, -8.7065541466623638861e+66, tol=PTOL)
assert ae(v.imag, 1.6577106725141739889e+67, tol=PTOL)
v = fp.ei((200.0 + 50.0j))
assert ae(v, (3.070744996327018106e+84 - 1.7243244846769415903e+84j), tol=ATOL)
assert ae(v.real, 3.070744996327018106e+84, tol=PTOL)
assert ae(v.imag, -1.7243244846769415903e+84, tol=PTOL)
v = fp.ei((320.0 + 80.0j))
assert ae(v, (-9.9960598637998647276e+135 - 2.6855081527595608863e+136j), tol=ATOL)
assert ae(v.real, -9.9960598637998647276e+135, tol=PTOL)
assert ae(v.imag, -2.6855081527595608863e+136, tol=PTOL)
v = fp.ei((1.1641532182693481445e-10 + 1.1641532182693481445e-10j))
assert ae(v, (-21.950067703180274374 + 0.78539816351386363145j), tol=ATOL)
assert ae(v.real, -21.950067703180274374, tol=PTOL)
assert ae(v.imag, 0.78539816351386363145, tol=PTOL)
v = fp.ei((0.25 + 0.25j))
assert ae(v, (-0.21441047326710323254 + 1.0683772981589995996j), tol=ATOL)
assert ae(v.real, -0.21441047326710323254, tol=PTOL)
assert ae(v.imag, 1.0683772981589995996, tol=PTOL)
v = fp.ei((1.0 + 1.0j))
assert ae(v, (1.7646259855638540684 + 2.3877698515105224193j), tol=ATOL)
assert ae(v.real, 1.7646259855638540684, tol=PTOL)
assert ae(v.imag, 2.3877698515105224193, tol=PTOL)
v = fp.ei((2.0 + 2.0j))
assert ae(v, (1.8920781621855474089 + 5.3169624378326579621j), tol=ATOL)
assert ae(v.real, 1.8920781621855474089, tol=PTOL)
assert ae(v.imag, 5.3169624378326579621, tol=PTOL)
v = fp.ei((5.0 + 5.0j))
assert ae(v, (-13.470936071475245856 - 15.322492395731230968j), tol=ATOL)
assert ae(v.real, -13.470936071475245856, tol=PTOL)
assert ae(v.imag, -15.322492395731230968, tol=PTOL)
v = fp.ei((20.0 + 20.0j))
assert ae(v, (16589317.398788971896 + 5831705.4712368307104j), tol=ATOL)
assert ae(v.real, 16589317.398788971896, tol=PTOL)
assert ae(v.imag, 5831705.4712368307104, tol=PTOL)
v = fp.ei((30.0 + 30.0j))
assert ae(v, (-154596484273.69322527 - 204179357834.2723043j), tol=ATOL)
assert ae(v.real, -154596484273.69322527, tol=PTOL)
assert ae(v.imag, -204179357834.2723043, tol=PTOL)
v = fp.ei((40.0 + 40.0j))
assert ae(v, (287512180321448.45408 + 4203502407932318.1156j), tol=ATOL)
assert ae(v.real, 287512180321448.45408, tol=PTOL)
assert ae(v.imag, 4203502407932318.1156, tol=PTOL)
v = fp.ei((50.0 + 50.0j))
assert ae(v, (36128528616649268826.0 - 64648801861338741960.0j), tol=ATOL)
assert ae(v.real, 36128528616649268826.0, tol=PTOL)
assert ae(v.imag, -64648801861338741960.0, tol=PTOL)
v = fp.ei((80.0 + 80.0j))
assert ae(v, (-3.8674816337930010217e+32 - 3.0540709639658071041e+32j), tol=ATOL)
assert ae(v.real, -3.8674816337930010217e+32, tol=PTOL)
assert ae(v.imag, -3.0540709639658071041e+32, tol=PTOL)
v = fp.ei((1.1641532182693481445e-10 + 4.6566128730773925781e-10j))
assert ae(v, (-20.880034621432138988 + 1.3258176641336937524j), tol=ATOL)
assert ae(v.real, -20.880034621432138988, tol=PTOL)
assert ae(v.imag, 1.3258176641336937524, tol=PTOL)
v = fp.ei((0.25 + 1.0j))
assert ae(v, (0.59066621214766308594 + 2.3968481059377428687j), tol=ATOL)
assert ae(v.real, 0.59066621214766308594, tol=PTOL)
assert ae(v.imag, 2.3968481059377428687, tol=PTOL)
v = fp.ei((1.0 + 4.0j))
assert ae(v, (-0.49739047283060471093 + 3.5570287076301818702j), tol=ATOL)
assert ae(v.real, -0.49739047283060471093, tol=PTOL)
assert ae(v.imag, 3.5570287076301818702, tol=PTOL)
v = fp.ei((2.0 + 8.0j))
assert ae(v, (0.8705211147733730969 + 3.3825859385758486351j), tol=ATOL)
assert ae(v.real, 0.8705211147733730969, tol=PTOL)
assert ae(v.imag, 3.3825859385758486351, tol=PTOL)
v = fp.ei((5.0 + 20.0j))
assert ae(v, (7.0789514293925893007 + 1.5313749363937141849j), tol=ATOL)
assert ae(v.real, 7.0789514293925893007, tol=PTOL)
assert ae(v.imag, 1.5313749363937141849, tol=PTOL)
v = fp.ei((20.0 + 80.0j))
assert ae(v, (-5855431.4907298084434 - 720917.79156143806727j), tol=ATOL)
assert ae(v.real, -5855431.4907298084434, tol=PTOL)
assert ae(v.imag, -720917.79156143806727, tol=PTOL)
v = fp.ei((30.0 + 120.0j))
assert ae(v, (65402491644.703470747 - 56697658396.51586764j), tol=ATOL)
assert ae(v.real, 65402491644.703470747, tol=PTOL)
assert ae(v.imag, -56697658396.51586764, tol=PTOL)
v = fp.ei((40.0 + 160.0j))
assert ae(v, (-25504929379604.776769 + 1429035198630576.3879j), tol=ATOL)
assert ae(v.real, -25504929379604.776769, tol=PTOL)
assert ae(v.imag, 1429035198630576.3879, tol=PTOL)
v = fp.ei((50.0 + 200.0j))
assert ae(v, (-18437746526988116954.0 - 17146362239046152342.0j), tol=ATOL)
assert ae(v.real, -18437746526988116954.0, tol=PTOL)
assert ae(v.imag, -17146362239046152342.0, tol=PTOL)
v = fp.ei((80.0 + 320.0j))
assert ae(v, (-3.3464697299634526706e+31 - 1.6473152633843023919e+32j), tol=ATOL)
assert ae(v.real, -3.3464697299634526706e+31, tol=PTOL)
assert ae(v.imag, -1.6473152633843023919e+32, tol=PTOL)
v = fp.ei((0.0 + 1.1641532182693481445e-10j))
assert ae(v, (-22.29664129357666235 + 1.5707963269113119411j), tol=ATOL)
assert ae(v.real, -22.29664129357666235, tol=PTOL)
assert ae(v.imag, 1.5707963269113119411, tol=PTOL)
v = fp.ei((0.0 + 0.25j))
assert ae(v, (-0.82466306258094565309 + 1.8199298971146537833j), tol=ATOL)
assert ae(v.real, -0.82466306258094565309, tol=PTOL)
assert ae(v.imag, 1.8199298971146537833, tol=PTOL)
v = fp.ei((0.0 + 1.0j))
assert ae(v, (0.33740392290096813466 + 2.5168793971620796342j), tol=ATOL)
assert ae(v.real, 0.33740392290096813466, tol=PTOL)
assert ae(v.imag, 2.5168793971620796342, tol=PTOL)
v = fp.ei((0.0 + 2.0j))
assert ae(v, (0.4229808287748649957 + 3.1762093035975914678j), tol=ATOL)
assert ae(v.real, 0.4229808287748649957, tol=PTOL)
assert ae(v.imag, 3.1762093035975914678, tol=PTOL)
v = fp.ei((0.0 + 5.0j))
assert ae(v, (-0.19002974965664387862 + 3.1207275717395707565j), tol=ATOL)
assert ae(v.real, -0.19002974965664387862, tol=PTOL)
assert ae(v.imag, 3.1207275717395707565, tol=PTOL)
v = fp.ei((0.0 + 20.0j))
assert ae(v, (0.04441982084535331654 + 3.1190380278383364594j), tol=ATOL)
assert ae(v.real, 0.04441982084535331654, tol=PTOL)
assert ae(v.imag, 3.1190380278383364594, tol=PTOL)
v = fp.ei((0.0 + 30.0j))
assert ae(v, (-0.033032417282071143779 + 3.1375528668252477302j), tol=ATOL)
assert ae(v.real, -0.033032417282071143779, tol=PTOL)
assert ae(v.imag, 3.1375528668252477302, tol=PTOL)
v = fp.ei((0.0 + 40.0j))
assert ae(v, (0.019020007896208766962 + 3.157781446149681126j), tol=ATOL)
assert ae(v.real, 0.019020007896208766962, tol=PTOL)
assert ae(v.imag, 3.157781446149681126, tol=PTOL)
v = fp.ei((0.0 + 50.0j))
assert ae(v, (-0.0056283863241163054402 + 3.122413399280832514j), tol=ATOL)
assert ae(v.real, -0.0056283863241163054402, tol=PTOL)
assert ae(v.imag, 3.122413399280832514, tol=PTOL)
v = fp.ei((0.0 + 80.0j))
assert ae(v, (-0.012402501155070958192 + 3.1431272137073839346j), tol=ATOL)
assert ae(v.real, -0.012402501155070958192, tol=PTOL)
assert ae(v.imag, 3.1431272137073839346, tol=PTOL)
v = fp.ei((-1.1641532182693481445e-10 + 4.6566128730773925781e-10j))
assert ae(v, (-20.880034621664969632 + 1.8157749903874220607j), tol=ATOL)
assert ae(v.real, -20.880034621664969632, tol=PTOL)
assert ae(v.imag, 1.8157749903874220607, tol=PTOL)
v = fp.ei((-0.25 + 1.0j))
assert ae(v, (0.16868306393667788761 + 2.6557914649950505414j), tol=ATOL)
assert ae(v.real, 0.16868306393667788761, tol=PTOL)
assert ae(v.imag, 2.6557914649950505414, tol=PTOL)
v = fp.ei((-1.0 + 4.0j))
assert ae(v, (-0.03373591813926547318 + 3.2151161058308770603j), tol=ATOL)
assert ae(v.real, -0.03373591813926547318, tol=PTOL)
assert ae(v.imag, 3.2151161058308770603, tol=PTOL)
v = fp.ei((-2.0 + 8.0j))
assert ae(v, (0.015392833434733785143 + 3.1384179414340326969j), tol=ATOL)
assert ae(v.real, 0.015392833434733785143, tol=PTOL)
assert ae(v.imag, 3.1384179414340326969, tol=PTOL)
v = fp.ei((-5.0 + 20.0j))
assert ae(v, (0.00024419662286542966525 + 3.1413825703601317109j), tol=ATOL)
assert ae(v.real, 0.00024419662286542966525, tol=PTOL)
assert ae(v.imag, 3.1413825703601317109, tol=PTOL)
v = fp.ei((-20.0 + 80.0j))
assert ae(v, (-2.3255552781051330088e-11 + 3.1415926535987396304j), tol=ATOL)
assert ae(v.real, -2.3255552781051330088e-11, tol=PTOL)
assert ae(v.imag, 3.1415926535987396304, tol=PTOL)
v = fp.ei((-30.0 + 120.0j))
assert ae(v, (2.7068919097124652332e-16 + 3.1415926535897925337j), tol=ATOL)
assert ae(v.real, 2.7068919097124652332e-16, tol=PTOL)
assert ae(v.imag, 3.1415926535897925337, tol=PTOL)
v = fp.ei((-40.0 + 160.0j))
assert ae(v, (1.1695597827678024687e-20 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 1.1695597827678024687e-20, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-50.0 + 200.0j))
assert ae(v, (-9.0323746914410162531e-25 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -9.0323746914410162531e-25, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-80.0 + 320.0j))
assert ae(v, (-3.4819106748728063576e-38 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -3.4819106748728063576e-38, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-4.6566128730773925781e-10 + 1.1641532182693481445e-10j))
assert ae(v, (-20.880034622014215597 + 2.8966139905793444061j), tol=ATOL)
assert ae(v.real, -20.880034622014215597, tol=PTOL)
assert ae(v.imag, 2.8966139905793444061, tol=PTOL)
v = fp.ei((-1.0 + 0.25j))
assert ae(v, (-0.19731063945004229095 + 3.0542266078154932748j), tol=ATOL)
assert ae(v.real, -0.19731063945004229095, tol=PTOL)
assert ae(v.imag, 3.0542266078154932748, tol=PTOL)
v = fp.ei((-4.0 + 1.0j))
assert ae(v, (-0.0013106173980145506944 + 3.1381384055698581758j), tol=ATOL)
assert ae(v.real, -0.0013106173980145506944, tol=PTOL)
assert ae(v.imag, 3.1381384055698581758, tol=PTOL)
v = fp.ei((-8.0 + 2.0j))
assert ae(v, (0.000022278049065270225945 + 3.1415634616493367169j), tol=ATOL)
assert ae(v.real, 0.000022278049065270225945, tol=PTOL)
assert ae(v.imag, 3.1415634616493367169, tol=PTOL)
v = fp.ei((-20.0 + 5.0j))
assert ae(v, (-4.7711374515765346894e-11 + 3.1415926536726958909j), tol=ATOL)
assert ae(v.real, -4.7711374515765346894e-11, tol=PTOL)
assert ae(v.imag, 3.1415926536726958909, tol=PTOL)
v = fp.ei((-80.0 + 20.0j))
assert ae(v, (-3.8353473865788235787e-38 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -3.8353473865788235787e-38, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-120.0 + 30.0j))
assert ae(v, (-2.3836002337480334716e-55 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -2.3836002337480334716e-55, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-160.0 + 40.0j))
assert ae(v, (1.6238022898654510661e-72 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 1.6238022898654510661e-72, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-200.0 + 50.0j))
assert ae(v, (-6.6800061461666228487e-90 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -6.6800061461666228487e-90, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei((-320.0 + 80.0j))
assert ae(v, (-4.2737871527778786157e-143 + 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -4.2737871527778786157e-143, tol=PTOL)
assert ae(v.imag, 3.1415926535897932385, tol=PTOL)
v = fp.ei(-1.1641532182693481445e-10)
assert ae(v, -22.296641293693077672, tol=ATOL)
assert type(v) is float
v = fp.ei(-0.25)
assert ae(v, -1.0442826344437381945, tol=ATOL)
assert type(v) is float
v = fp.ei(-1.0)
assert ae(v, -0.21938393439552027368, tol=ATOL)
assert type(v) is float
v = fp.ei(-2.0)
assert ae(v, -0.048900510708061119567, tol=ATOL)
assert type(v) is float
v = fp.ei(-5.0)
assert ae(v, -0.0011482955912753257973, tol=ATOL)
assert type(v) is float
v = fp.ei(-20.0)
assert ae(v, -9.8355252906498816904e-11, tol=ATOL)
assert type(v) is float
v = fp.ei(-30.0)
assert ae(v, -3.0215520106888125448e-15, tol=ATOL)
assert type(v) is float
v = fp.ei(-40.0)
assert ae(v, -1.0367732614516569722e-19, tol=ATOL)
assert type(v) is float
v = fp.ei(-50.0)
assert ae(v, -3.7832640295504590187e-24, tol=ATOL)
assert type(v) is float
v = fp.ei(-80.0)
assert ae(v, -2.2285432586884729112e-37, tol=ATOL)
assert type(v) is float
v = fp.ei((-1.1641532182693481445e-10 + 0.0j))
assert ae(v, (-22.296641293693077672 + 0.0j), tol=ATOL)
assert ae(v.real, -22.296641293693077672, tol=PTOL)
assert v.imag == 0
v = fp.ei((-0.25 + 0.0j))
assert ae(v, (-1.0442826344437381945 + 0.0j), tol=ATOL)
assert ae(v.real, -1.0442826344437381945, tol=PTOL)
assert v.imag == 0
v = fp.ei((-1.0 + 0.0j))
assert ae(v, (-0.21938393439552027368 + 0.0j), tol=ATOL)
assert ae(v.real, -0.21938393439552027368, tol=PTOL)
assert v.imag == 0
v = fp.ei((-2.0 + 0.0j))
assert ae(v, (-0.048900510708061119567 + 0.0j), tol=ATOL)
assert ae(v.real, -0.048900510708061119567, tol=PTOL)
assert v.imag == 0
v = fp.ei((-5.0 + 0.0j))
assert ae(v, (-0.0011482955912753257973 + 0.0j), tol=ATOL)
assert ae(v.real, -0.0011482955912753257973, tol=PTOL)
assert v.imag == 0
v = fp.ei((-20.0 + 0.0j))
assert ae(v, (-9.8355252906498816904e-11 + 0.0j), tol=ATOL)
assert ae(v.real, -9.8355252906498816904e-11, tol=PTOL)
assert v.imag == 0
v = fp.ei((-30.0 + 0.0j))
assert ae(v, (-3.0215520106888125448e-15 + 0.0j), tol=ATOL)
assert ae(v.real, -3.0215520106888125448e-15, tol=PTOL)
assert v.imag == 0
v = fp.ei((-40.0 + 0.0j))
assert ae(v, (-1.0367732614516569722e-19 + 0.0j), tol=ATOL)
assert ae(v.real, -1.0367732614516569722e-19, tol=PTOL)
assert v.imag == 0
v = fp.ei((-50.0 + 0.0j))
assert ae(v, (-3.7832640295504590187e-24 + 0.0j), tol=ATOL)
assert ae(v.real, -3.7832640295504590187e-24, tol=PTOL)
assert v.imag == 0
v = fp.ei((-80.0 + 0.0j))
assert ae(v, (-2.2285432586884729112e-37 + 0.0j), tol=ATOL)
assert ae(v.real, -2.2285432586884729112e-37, tol=PTOL)
assert v.imag == 0
v = fp.ei((-4.6566128730773925781e-10 - 1.1641532182693481445e-10j))
assert ae(v, (-20.880034622014215597 - 2.8966139905793444061j), tol=ATOL)
assert ae(v.real, -20.880034622014215597, tol=PTOL)
assert ae(v.imag, -2.8966139905793444061, tol=PTOL)
v = fp.ei((-1.0 - 0.25j))
assert ae(v, (-0.19731063945004229095 - 3.0542266078154932748j), tol=ATOL)
assert ae(v.real, -0.19731063945004229095, tol=PTOL)
assert ae(v.imag, -3.0542266078154932748, tol=PTOL)
v = fp.ei((-4.0 - 1.0j))
assert ae(v, (-0.0013106173980145506944 - 3.1381384055698581758j), tol=ATOL)
assert ae(v.real, -0.0013106173980145506944, tol=PTOL)
assert ae(v.imag, -3.1381384055698581758, tol=PTOL)
v = fp.ei((-8.0 - 2.0j))
assert ae(v, (0.000022278049065270225945 - 3.1415634616493367169j), tol=ATOL)
assert ae(v.real, 0.000022278049065270225945, tol=PTOL)
assert ae(v.imag, -3.1415634616493367169, tol=PTOL)
v = fp.ei((-20.0 - 5.0j))
assert ae(v, (-4.7711374515765346894e-11 - 3.1415926536726958909j), tol=ATOL)
assert ae(v.real, -4.7711374515765346894e-11, tol=PTOL)
assert ae(v.imag, -3.1415926536726958909, tol=PTOL)
v = fp.ei((-80.0 - 20.0j))
assert ae(v, (-3.8353473865788235787e-38 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -3.8353473865788235787e-38, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-120.0 - 30.0j))
assert ae(v, (-2.3836002337480334716e-55 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -2.3836002337480334716e-55, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-160.0 - 40.0j))
assert ae(v, (1.6238022898654510661e-72 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 1.6238022898654510661e-72, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-200.0 - 50.0j))
assert ae(v, (-6.6800061461666228487e-90 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -6.6800061461666228487e-90, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-320.0 - 80.0j))
assert ae(v, (-4.2737871527778786157e-143 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -4.2737871527778786157e-143, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-1.1641532182693481445e-10 - 1.1641532182693481445e-10j))
assert ae(v, (-21.950067703413105017 - 2.3561944903087602507j), tol=ATOL)
assert ae(v.real, -21.950067703413105017, tol=PTOL)
assert ae(v.imag, -2.3561944903087602507, tol=PTOL)
v = fp.ei((-0.25 - 0.25j))
assert ae(v, (-0.71092525792923287894 - 2.5766745291767512913j), tol=ATOL)
assert ae(v.real, -0.71092525792923287894, tol=PTOL)
assert ae(v.imag, -2.5766745291767512913, tol=PTOL)
v = fp.ei((-1.0 - 1.0j))
assert ae(v, (-0.00028162445198141832551 - 2.9622681185504342983j), tol=ATOL)
assert ae(v.real, -0.00028162445198141832551, tol=PTOL)
assert ae(v.imag, -2.9622681185504342983, tol=PTOL)
v = fp.ei((-2.0 - 2.0j))
assert ae(v, (0.033767089606562004246 - 3.1229932394200426965j), tol=ATOL)
assert ae(v.real, 0.033767089606562004246, tol=PTOL)
assert ae(v.imag, -3.1229932394200426965, tol=PTOL)
v = fp.ei((-5.0 - 5.0j))
assert ae(v, (-0.0007266506660356393891 - 3.1420636813914284609j), tol=ATOL)
assert ae(v.real, -0.0007266506660356393891, tol=PTOL)
assert ae(v.imag, -3.1420636813914284609, tol=PTOL)
v = fp.ei((-20.0 - 20.0j))
assert ae(v, (2.3824537449367396579e-11 - 3.1415926535228233653j), tol=ATOL)
assert ae(v.real, 2.3824537449367396579e-11, tol=PTOL)
assert ae(v.imag, -3.1415926535228233653, tol=PTOL)
v = fp.ei((-30.0 - 30.0j))
assert ae(v, (-1.7316045841744061617e-15 - 3.141592653589794545j), tol=ATOL)
assert ae(v.real, -1.7316045841744061617e-15, tol=PTOL)
assert ae(v.imag, -3.141592653589794545, tol=PTOL)
v = fp.ei((-40.0 - 40.0j))
assert ae(v, (7.4001043002899232182e-20 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 7.4001043002899232182e-20, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-50.0 - 50.0j))
assert ae(v, (-2.3566128324644641219e-24 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -2.3566128324644641219e-24, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-80.0 - 80.0j))
assert ae(v, (-9.8279750572186526673e-38 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -9.8279750572186526673e-38, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-1.1641532182693481445e-10 - 4.6566128730773925781e-10j))
assert ae(v, (-20.880034621664969632 - 1.8157749903874220607j), tol=ATOL)
assert ae(v.real, -20.880034621664969632, tol=PTOL)
assert ae(v.imag, -1.8157749903874220607, tol=PTOL)
v = fp.ei((-0.25 - 1.0j))
assert ae(v, (0.16868306393667788761 - 2.6557914649950505414j), tol=ATOL)
assert ae(v.real, 0.16868306393667788761, tol=PTOL)
assert ae(v.imag, -2.6557914649950505414, tol=PTOL)
v = fp.ei((-1.0 - 4.0j))
assert ae(v, (-0.03373591813926547318 - 3.2151161058308770603j), tol=ATOL)
assert ae(v.real, -0.03373591813926547318, tol=PTOL)
assert ae(v.imag, -3.2151161058308770603, tol=PTOL)
v = fp.ei((-2.0 - 8.0j))
assert ae(v, (0.015392833434733785143 - 3.1384179414340326969j), tol=ATOL)
assert ae(v.real, 0.015392833434733785143, tol=PTOL)
assert ae(v.imag, -3.1384179414340326969, tol=PTOL)
v = fp.ei((-5.0 - 20.0j))
assert ae(v, (0.00024419662286542966525 - 3.1413825703601317109j), tol=ATOL)
assert ae(v.real, 0.00024419662286542966525, tol=PTOL)
assert ae(v.imag, -3.1413825703601317109, tol=PTOL)
v = fp.ei((-20.0 - 80.0j))
assert ae(v, (-2.3255552781051330088e-11 - 3.1415926535987396304j), tol=ATOL)
assert ae(v.real, -2.3255552781051330088e-11, tol=PTOL)
assert ae(v.imag, -3.1415926535987396304, tol=PTOL)
v = fp.ei((-30.0 - 120.0j))
assert ae(v, (2.7068919097124652332e-16 - 3.1415926535897925337j), tol=ATOL)
assert ae(v.real, 2.7068919097124652332e-16, tol=PTOL)
assert ae(v.imag, -3.1415926535897925337, tol=PTOL)
v = fp.ei((-40.0 - 160.0j))
assert ae(v, (1.1695597827678024687e-20 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, 1.1695597827678024687e-20, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-50.0 - 200.0j))
assert ae(v, (-9.0323746914410162531e-25 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -9.0323746914410162531e-25, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((-80.0 - 320.0j))
assert ae(v, (-3.4819106748728063576e-38 - 3.1415926535897932385j), tol=ATOL)
assert ae(v.real, -3.4819106748728063576e-38, tol=PTOL)
assert ae(v.imag, -3.1415926535897932385, tol=PTOL)
v = fp.ei((0.0 - 1.1641532182693481445e-10j))
assert ae(v, (-22.29664129357666235 - 1.5707963269113119411j), tol=ATOL)
assert ae(v.real, -22.29664129357666235, tol=PTOL)
assert ae(v.imag, -1.5707963269113119411, tol=PTOL)
v = fp.ei((0.0 - 0.25j))
assert ae(v, (-0.82466306258094565309 - 1.8199298971146537833j), tol=ATOL)
assert ae(v.real, -0.82466306258094565309, tol=PTOL)
assert ae(v.imag, -1.8199298971146537833, tol=PTOL)
v = fp.ei((0.0 - 1.0j))
assert ae(v, (0.33740392290096813466 - 2.5168793971620796342j), tol=ATOL)
assert ae(v.real, 0.33740392290096813466, tol=PTOL)
assert ae(v.imag, -2.5168793971620796342, tol=PTOL)
v = fp.ei((0.0 - 2.0j))
assert ae(v, (0.4229808287748649957 - 3.1762093035975914678j), tol=ATOL)
assert ae(v.real, 0.4229808287748649957, tol=PTOL)
assert ae(v.imag, -3.1762093035975914678, tol=PTOL)
v = fp.ei((0.0 - 5.0j))
assert ae(v, (-0.19002974965664387862 - 3.1207275717395707565j), tol=ATOL)
assert ae(v.real, -0.19002974965664387862, tol=PTOL)
assert ae(v.imag, -3.1207275717395707565, tol=PTOL)
v = fp.ei((0.0 - 20.0j))
assert ae(v, (0.04441982084535331654 - 3.1190380278383364594j), tol=ATOL)
assert ae(v.real, 0.04441982084535331654, tol=PTOL)
assert ae(v.imag, -3.1190380278383364594, tol=PTOL)
v = fp.ei((0.0 - 30.0j))
assert ae(v, (-0.033032417282071143779 - 3.1375528668252477302j), tol=ATOL)
assert ae(v.real, -0.033032417282071143779, tol=PTOL)
assert ae(v.imag, -3.1375528668252477302, tol=PTOL)
v = fp.ei((0.0 - 40.0j))
assert ae(v, (0.019020007896208766962 - 3.157781446149681126j), tol=ATOL)
assert ae(v.real, 0.019020007896208766962, tol=PTOL)
assert ae(v.imag, -3.157781446149681126, tol=PTOL)
v = fp.ei((0.0 - 50.0j))
assert ae(v, (-0.0056283863241163054402 - 3.122413399280832514j), tol=ATOL)
assert ae(v.real, -0.0056283863241163054402, tol=PTOL)
assert ae(v.imag, -3.122413399280832514, tol=PTOL)
v = fp.ei((0.0 - 80.0j))
assert ae(v, (-0.012402501155070958192 - 3.1431272137073839346j), tol=ATOL)
assert ae(v.real, -0.012402501155070958192, tol=PTOL)
assert ae(v.imag, -3.1431272137073839346, tol=PTOL)
v = fp.ei((1.1641532182693481445e-10 - 4.6566128730773925781e-10j))
assert ae(v, (-20.880034621432138988 - 1.3258176641336937524j), tol=ATOL)
assert ae(v.real, -20.880034621432138988, tol=PTOL)
assert ae(v.imag, -1.3258176641336937524, tol=PTOL)
v = fp.ei((0.25 - 1.0j))
assert ae(v, (0.59066621214766308594 - 2.3968481059377428687j), tol=ATOL)
assert ae(v.real, 0.59066621214766308594, tol=PTOL)
assert ae(v.imag, -2.3968481059377428687, tol=PTOL)
v = fp.ei((1.0 - 4.0j))
assert ae(v, (-0.49739047283060471093 - 3.5570287076301818702j), tol=ATOL)
assert ae(v.real, -0.49739047283060471093, tol=PTOL)
assert ae(v.imag, -3.5570287076301818702, tol=PTOL)
v = fp.ei((2.0 - 8.0j))
assert ae(v, (0.8705211147733730969 - 3.3825859385758486351j), tol=ATOL)
assert ae(v.real, 0.8705211147733730969, tol=PTOL)
assert ae(v.imag, -3.3825859385758486351, tol=PTOL)
v = fp.ei((5.0 - 20.0j))
assert ae(v, (7.0789514293925893007 - 1.5313749363937141849j), tol=ATOL)
assert ae(v.real, 7.0789514293925893007, tol=PTOL)
assert ae(v.imag, -1.5313749363937141849, tol=PTOL)
v = fp.ei((20.0 - 80.0j))
assert ae(v, (-5855431.4907298084434 + 720917.79156143806727j), tol=ATOL)
assert ae(v.real, -5855431.4907298084434, tol=PTOL)
assert ae(v.imag, 720917.79156143806727, tol=PTOL)
v = fp.ei((30.0 - 120.0j))
assert ae(v, (65402491644.703470747 + 56697658396.51586764j), tol=ATOL)
assert ae(v.real, 65402491644.703470747, tol=PTOL)
assert ae(v.imag, 56697658396.51586764, tol=PTOL)
v = fp.ei((40.0 - 160.0j))
assert ae(v, (-25504929379604.776769 - 1429035198630576.3879j), tol=ATOL)
assert ae(v.real, -25504929379604.776769, tol=PTOL)
assert ae(v.imag, -1429035198630576.3879, tol=PTOL)
v = fp.ei((50.0 - 200.0j))
assert ae(v, (-18437746526988116954.0 + 17146362239046152342.0j), tol=ATOL)
assert ae(v.real, -18437746526988116954.0, tol=PTOL)
assert ae(v.imag, 17146362239046152342.0, tol=PTOL)
v = fp.ei((80.0 - 320.0j))
assert ae(v, (-3.3464697299634526706e+31 + 1.6473152633843023919e+32j), tol=ATOL)
assert ae(v.real, -3.3464697299634526706e+31, tol=PTOL)
assert ae(v.imag, 1.6473152633843023919e+32, tol=PTOL)
v = fp.ei((1.1641532182693481445e-10 - 1.1641532182693481445e-10j))
assert ae(v, (-21.950067703180274374 - 0.78539816351386363145j), tol=ATOL)
assert ae(v.real, -21.950067703180274374, tol=PTOL)
assert ae(v.imag, -0.78539816351386363145, tol=PTOL)
v = fp.ei((0.25 - 0.25j))
assert ae(v, (-0.21441047326710323254 - 1.0683772981589995996j), tol=ATOL)
assert ae(v.real, -0.21441047326710323254, tol=PTOL)
assert ae(v.imag, -1.0683772981589995996, tol=PTOL)
v = fp.ei((1.0 - 1.0j))
assert ae(v, (1.7646259855638540684 - 2.3877698515105224193j), tol=ATOL)
assert ae(v.real, 1.7646259855638540684, tol=PTOL)
assert ae(v.imag, -2.3877698515105224193, tol=PTOL)
v = fp.ei((2.0 - 2.0j))
assert ae(v, (1.8920781621855474089 - 5.3169624378326579621j), tol=ATOL)
assert ae(v.real, 1.8920781621855474089, tol=PTOL)
assert ae(v.imag, -5.3169624378326579621, tol=PTOL)
v = fp.ei((5.0 - 5.0j))
assert ae(v, (-13.470936071475245856 + 15.322492395731230968j), tol=ATOL)
assert ae(v.real, -13.470936071475245856, tol=PTOL)
assert ae(v.imag, 15.322492395731230968, tol=PTOL)
v = fp.ei((20.0 - 20.0j))
assert ae(v, (16589317.398788971896 - 5831705.4712368307104j), tol=ATOL)
assert ae(v.real, 16589317.398788971896, tol=PTOL)
assert ae(v.imag, -5831705.4712368307104, tol=PTOL)
v = fp.ei((30.0 - 30.0j))
assert ae(v, (-154596484273.69322527 + 204179357834.2723043j), tol=ATOL)
assert ae(v.real, -154596484273.69322527, tol=PTOL)
assert ae(v.imag, 204179357834.2723043, tol=PTOL)
v = fp.ei((40.0 - 40.0j))
assert ae(v, (287512180321448.45408 - 4203502407932318.1156j), tol=ATOL)
assert ae(v.real, 287512180321448.45408, tol=PTOL)
assert ae(v.imag, -4203502407932318.1156, tol=PTOL)
v = fp.ei((50.0 - 50.0j))
assert ae(v, (36128528616649268826.0 + 64648801861338741960.0j), tol=ATOL)
assert ae(v.real, 36128528616649268826.0, tol=PTOL)
assert ae(v.imag, 64648801861338741960.0, tol=PTOL)
v = fp.ei((80.0 - 80.0j))
assert ae(v, (-3.8674816337930010217e+32 + 3.0540709639658071041e+32j), tol=ATOL)
assert ae(v.real, -3.8674816337930010217e+32, tol=PTOL)
assert ae(v.imag, 3.0540709639658071041e+32, tol=PTOL)
v = fp.ei((4.6566128730773925781e-10 - 1.1641532182693481445e-10j))
assert ae(v, (-20.880034621082893023 - 0.24497866324327947603j), tol=ATOL)
assert ae(v.real, -20.880034621082893023, tol=PTOL)
assert ae(v.imag, -0.24497866324327947603, tol=PTOL)
v = fp.ei((1.0 - 0.25j))
assert ae(v, (1.8942716983721074932 - 0.67268237088273915854j), tol=ATOL)
assert ae(v.real, 1.8942716983721074932, tol=PTOL)
assert ae(v.imag, -0.67268237088273915854, tol=PTOL)
v = fp.ei((4.0 - 1.0j))
assert ae(v, (14.806699492675420438 - 12.280015176673582616j), tol=ATOL)
assert ae(v.real, 14.806699492675420438, tol=PTOL)
assert ae(v.imag, -12.280015176673582616, tol=PTOL)
v = fp.ei((8.0 - 2.0j))
assert ae(v, (-54.633252667426386294 - 416.34477429173650012j), tol=ATOL)
assert ae(v.real, -54.633252667426386294, tol=PTOL)
assert ae(v.imag, -416.34477429173650012, tol=PTOL)
v = fp.ei((20.0 - 5.0j))
assert ae(v, (711836.97165402624643 + 24745247.798103247366j), tol=ATOL)
assert ae(v.real, 711836.97165402624643, tol=PTOL)
assert ae(v.imag, 24745247.798103247366, tol=PTOL)
v = fp.ei((80.0 - 20.0j))
assert ae(v, (4.2139911108612653091e+32 - 5.3367124741918251637e+32j), tol=ATOL)
assert ae(v.real, 4.2139911108612653091e+32, tol=PTOL)
assert ae(v.imag, -5.3367124741918251637e+32, tol=PTOL)
v = fp.ei((120.0 - 30.0j))
assert ae(v, (-9.7760616203707508892e+48 + 1.058257682317195792e+50j), tol=ATOL)
assert ae(v.real, -9.7760616203707508892e+48, tol=PTOL)
assert ae(v.imag, 1.058257682317195792e+50, tol=PTOL)
v = fp.ei((160.0 - 40.0j))
assert ae(v, (-8.7065541466623638861e+66 - 1.6577106725141739889e+67j), tol=ATOL)
assert ae(v.real, -8.7065541466623638861e+66, tol=PTOL)
assert ae(v.imag, -1.6577106725141739889e+67, tol=PTOL)
v = fp.ei((200.0 - 50.0j))
assert ae(v, (3.070744996327018106e+84 + 1.7243244846769415903e+84j), tol=ATOL)
assert ae(v.real, 3.070744996327018106e+84, tol=PTOL)
assert ae(v.imag, 1.7243244846769415903e+84, tol=PTOL)
v = fp.ei((320.0 - 80.0j))
assert ae(v, (-9.9960598637998647276e+135 + 2.6855081527595608863e+136j), tol=ATOL)
assert ae(v.real, -9.9960598637998647276e+135, tol=PTOL)
assert ae(v.imag, 2.6855081527595608863e+136, tol=PTOL)
| bsd-3-clause | ef051903d7fa366579d2c478cb86b8b5 | 52.858169 | 101 | 0.681312 | 2.159444 | false | false | false | false |
fredrik-johansson/mpmath | mpmath/matrices/linalg.py | 1 | 26958 | """
Linear algebra
--------------
Linear equations
................
Basic linear algebra is implemented; you can for example solve the linear
equation system::
x + 2*y = -10
3*x + 4*y = 10
using ``lu_solve``::
>>> from mpmath import *
>>> mp.pretty = False
>>> A = matrix([[1, 2], [3, 4]])
>>> b = matrix([-10, 10])
>>> x = lu_solve(A, b)
>>> x
matrix(
[['30.0'],
['-20.0']])
If you don't trust the result, use ``residual`` to calculate the residual ||A*x-b||::
>>> residual(A, x, b)
matrix(
[['3.46944695195361e-18'],
['3.46944695195361e-18']])
>>> str(eps)
'2.22044604925031e-16'
As you can see, the solution is quite accurate. The error is caused by the
inaccuracy of the internal floating point arithmetic. Though, it's even smaller
than the current machine epsilon, which basically means you can trust the
result.
If you need more speed, use NumPy, or ``fp.lu_solve`` for a floating-point computation.
>>> fp.lu_solve(A, b) # doctest: +ELLIPSIS
matrix(...)
``lu_solve`` accepts overdetermined systems. It is usually not possible to solve
such systems, so the residual is minimized instead. Internally this is done
using Cholesky decomposition to compute a least squares approximation. This means
that that ``lu_solve`` will square the errors. If you can't afford this, use
``qr_solve`` instead. It is twice as slow but more accurate, and it calculates
the residual automatically.
Matrix factorization
....................
The function ``lu`` computes an explicit LU factorization of a matrix::
>>> P, L, U = lu(matrix([[0,2,3],[4,5,6],[7,8,9]]))
>>> print(P)
[0.0 0.0 1.0]
[1.0 0.0 0.0]
[0.0 1.0 0.0]
>>> print(L)
[ 1.0 0.0 0.0]
[ 0.0 1.0 0.0]
[0.571428571428571 0.214285714285714 1.0]
>>> print(U)
[7.0 8.0 9.0]
[0.0 2.0 3.0]
[0.0 0.0 0.214285714285714]
>>> print(P.T*L*U)
[0.0 2.0 3.0]
[4.0 5.0 6.0]
[7.0 8.0 9.0]
Interval matrices
-----------------
Matrices may contain interval elements. This allows one to perform
basic linear algebra operations such as matrix multiplication
and equation solving with rigorous error bounds::
>>> a = iv.matrix([['0.1','0.3','1.0'],
... ['7.1','5.5','4.8'],
... ['3.2','4.4','5.6']])
>>>
>>> b = iv.matrix(['4','0.6','0.5'])
>>> c = iv.lu_solve(a, b)
>>> print(c)
[ [5.2582327113062568605927528666, 5.25823271130625686059275702219]]
[[-13.1550493962678375411635581388, -13.1550493962678375411635540152]]
[ [7.42069154774972557628979076189, 7.42069154774972557628979190734]]
>>> print(a*c)
[ [3.99999999999999999999999844904, 4.00000000000000000000000155096]]
[[0.599999999999999999999968898009, 0.600000000000000000000031763736]]
[[0.499999999999999999999979320485, 0.500000000000000000000020679515]]
"""
# TODO:
# *implement high-level qr()
# *test unitvector
# *iterative solving
from copy import copy
from ..libmp.backend import xrange
class LinearAlgebraMethods(object):
def LU_decomp(ctx, A, overwrite=False, use_cache=True):
"""
LU-factorization of a n*n matrix using the Gauss algorithm.
Returns L and U in one matrix and the pivot indices.
Use overwrite to specify whether A will be overwritten with L and U.
"""
if not A.rows == A.cols:
raise ValueError('need n*n matrix')
# get from cache if possible
if use_cache and isinstance(A, ctx.matrix) and A._LU:
return A._LU
if not overwrite:
orig = A
A = A.copy()
tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger
n = A.rows
p = [None]*(n - 1)
for j in xrange(n - 1):
# pivoting, choose max(abs(reciprocal row sum)*abs(pivot element))
biggest = 0
for k in xrange(j, n):
s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)])
if ctx.absmin(s) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
current = 1/s * ctx.absmin(A[k,j])
if current > biggest: # TODO: what if equal?
biggest = current
p[j] = k
# swap rows according to p
ctx.swap_row(A, j, p[j])
if ctx.absmin(A[j,j]) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
# calculate elimination factors and add rows
for i in xrange(j + 1, n):
A[i,j] /= A[j,j]
for k in xrange(j + 1, n):
A[i,k] -= A[i,j]*A[j,k]
if ctx.absmin(A[n - 1,n - 1]) <= tol:
raise ZeroDivisionError('matrix is numerically singular')
# cache decomposition
if not overwrite and isinstance(orig, ctx.matrix):
orig._LU = (A, p)
return A, p
def L_solve(ctx, L, b, p=None):
"""
Solve the lower part of a LU factorized matrix for y.
"""
if L.rows != L.cols:
raise RuntimeError("need n*n matrix")
n = L.rows
if len(b) != n:
raise ValueError("Value should be equal to n")
b = copy(b)
if p: # swap b according to p
for k in xrange(0, len(p)):
ctx.swap_row(b, k, p[k])
# solve
for i in xrange(1, n):
for j in xrange(i):
b[i] -= L[i,j] * b[j]
return b
def U_solve(ctx, U, y):
"""
Solve the upper part of a LU factorized matrix for x.
"""
if U.rows != U.cols:
raise RuntimeError("need n*n matrix")
n = U.rows
if len(y) != n:
raise ValueError("Value should be equal to n")
x = copy(y)
for i in xrange(n - 1, -1, -1):
for j in xrange(i + 1, n):
x[i] -= U[i,j] * x[j]
x[i] /= U[i,i]
return x
def lu_solve(ctx, A, b, **kwargs):
"""
Ax = b => x
Solve a determined or overdetermined linear equations system.
Fast LU decomposition is used, which is less accurate than QR decomposition
(especially for overdetermined systems), but it's twice as efficient.
Use qr_solve if you want more precision or have to solve a very ill-
conditioned system.
If you specify real=True, it does not check for overdeterminded complex
systems.
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows < A.cols:
raise ValueError('cannot solve underdetermined system')
if A.rows > A.cols:
# use least-squares method if overdetermined
# (this increases errors)
AH = A.H
A = AH * A
b = AH * b
if (kwargs.get('real', False) or
not sum(type(i) is ctx.mpc for i in A)):
# TODO: necessary to check also b?
x = ctx.cholesky_solve(A, b)
else:
x = ctx.lu_solve(A, b)
else:
# LU factorization
A, p = ctx.LU_decomp(A)
b = ctx.L_solve(A, b, p)
x = ctx.U_solve(A, b)
finally:
ctx.prec = prec
return x
def improve_solution(ctx, A, x, b, maxsteps=1):
"""
Improve a solution to a linear equation system iteratively.
This re-uses the LU decomposition and is thus cheap.
Usually 3 up to 4 iterations are giving the maximal improvement.
"""
if A.rows != A.cols:
raise RuntimeError("need n*n matrix") # TODO: really?
for _ in xrange(maxsteps):
r = ctx.residual(A, x, b)
if ctx.norm(r, 2) < 10*ctx.eps:
break
# this uses cached LU decomposition and is thus cheap
dx = ctx.lu_solve(A, -r)
x += dx
return x
def lu(ctx, A):
"""
A -> P, L, U
LU factorisation of a square matrix A. L is the lower, U the upper part.
P is the permutation matrix indicating the row swaps.
P*A = L*U
If you need efficiency, use the low-level method LU_decomp instead, it's
much more memory efficient.
"""
# get factorization
A, p = ctx.LU_decomp(A)
n = A.rows
L = ctx.matrix(n)
U = ctx.matrix(n)
for i in xrange(n):
for j in xrange(n):
if i > j:
L[i,j] = A[i,j]
elif i == j:
L[i,j] = 1
U[i,j] = A[i,j]
else:
U[i,j] = A[i,j]
# calculate permutation matrix
P = ctx.eye(n)
for k in xrange(len(p)):
ctx.swap_row(P, k, p[k])
return P, L, U
def unitvector(ctx, n, i):
"""
Return the i-th n-dimensional unit vector.
"""
assert 0 < i <= n, 'this unit vector does not exist'
return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i)
def inverse(ctx, A, **kwargs):
"""
Calculate the inverse of a matrix.
If you want to solve an equation system Ax = b, it's recommended to use
solve(A, b) instead, it's about 3 times more efficient.
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A
A = ctx.matrix(A, **kwargs).copy()
n = A.rows
# get LU factorisation
A, p = ctx.LU_decomp(A)
cols = []
# calculate unit vectors and solve corresponding system to get columns
for i in xrange(1, n + 1):
e = ctx.unitvector(n, i)
y = ctx.L_solve(A, e, p)
cols.append(ctx.U_solve(A, y))
# convert columns to matrix
inv = []
for i in xrange(n):
row = []
for j in xrange(n):
row.append(cols[j][i])
inv.append(row)
result = ctx.matrix(inv, **kwargs)
finally:
ctx.prec = prec
return result
def householder(ctx, A):
"""
(A|b) -> H, p, x, res
(A|b) is the coefficient matrix with left hand side of an optionally
overdetermined linear equation system.
H and p contain all information about the transformation matrices.
x is the solution, res the residual.
"""
if not isinstance(A, ctx.matrix):
raise TypeError("A should be a type of ctx.matrix")
m = A.rows
n = A.cols
if m < n - 1:
raise RuntimeError("Columns should not be less than rows")
# calculate Householder matrix
p = []
for j in xrange(0, n - 1):
s = ctx.fsum(abs(A[i,j])**2 for i in xrange(j, m))
if not abs(s) > ctx.eps:
raise ValueError('matrix is numerically singular')
p.append(-ctx.sign(ctx.re(A[j,j])) * ctx.sqrt(s))
kappa = ctx.one / (s - p[j] * A[j,j])
A[j,j] -= p[j]
for k in xrange(j+1, n):
y = ctx.fsum(ctx.conj(A[i,j]) * A[i,k] for i in xrange(j, m)) * kappa
for i in xrange(j, m):
A[i,k] -= A[i,j] * y
# solve Rx = c1
x = [A[i,n - 1] for i in xrange(n - 1)]
for i in xrange(n - 2, -1, -1):
x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1))
x[i] /= p[i]
# calculate residual
if not m == n - 1:
r = [A[m-1-i, n-1] for i in xrange(m - n + 1)]
else:
# determined system, residual should be 0
r = [0]*m # maybe a bad idea, changing r[i] will change all elements
return A, p, x, r
#def qr(ctx, A):
# """
# A -> Q, R
#
# QR factorisation of a square matrix A using Householder decomposition.
# Q is orthogonal, this leads to very few numerical errors.
#
# A = Q*R
# """
# H, p, x, res = householder(A)
# TODO: implement this
def residual(ctx, A, x, b, **kwargs):
"""
Calculate the residual of a solution to a linear equation system.
r = A*x - b for A*x = b
"""
oldprec = ctx.prec
try:
ctx.prec *= 2
A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs)
return A*x - b
finally:
ctx.prec = oldprec
def qr_solve(ctx, A, b, norm=None, **kwargs):
"""
Ax = b => x, ||Ax - b||
Solve a determined or overdetermined linear equations system and
calculate the norm of the residual (error).
QR decomposition using Householder factorization is applied, which gives very
accurate results even for ill-conditioned matrices. qr_solve is twice as
efficient.
"""
if norm is None:
norm = ctx.norm
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows < A.cols:
raise ValueError('cannot solve underdetermined system')
H, p, x, r = ctx.householder(ctx.extend(A, b))
res = ctx.norm(r)
# calculate residual "manually" for determined systems
if res == 0:
res = ctx.norm(ctx.residual(A, x, b))
return ctx.matrix(x, **kwargs), res
finally:
ctx.prec = prec
def cholesky(ctx, A, tol=None):
r"""
Cholesky decomposition of a symmetric positive-definite matrix `A`.
Returns a lower triangular matrix `L` such that `A = L \times L^T`.
More generally, for a complex Hermitian positive-definite matrix,
a Cholesky decomposition satisfying `A = L \times L^H` is returned.
The Cholesky decomposition can be used to solve linear equation
systems twice as efficiently as LU decomposition, or to
test whether `A` is positive-definite.
The optional parameter ``tol`` determines the tolerance for
verifying positive-definiteness.
**Examples**
Cholesky decomposition of a positive-definite symmetric matrix::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> A = eye(3) + hilbert(3)
>>> nprint(A)
[ 2.0 0.5 0.333333]
[ 0.5 1.33333 0.25]
[0.333333 0.25 1.2]
>>> L = cholesky(A)
>>> nprint(L)
[ 1.41421 0.0 0.0]
[0.353553 1.09924 0.0]
[0.235702 0.15162 1.05899]
>>> chop(A - L*L.T)
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
Cholesky decomposition of a Hermitian matrix::
>>> A = eye(3) + matrix([[0,0.25j,-0.5j],[-0.25j,0,0],[0.5j,0,0]])
>>> L = cholesky(A)
>>> nprint(L)
[ 1.0 0.0 0.0]
[(0.0 - 0.25j) (0.968246 + 0.0j) 0.0]
[ (0.0 + 0.5j) (0.129099 + 0.0j) (0.856349 + 0.0j)]
>>> chop(A - L*L.H)
[0.0 0.0 0.0]
[0.0 0.0 0.0]
[0.0 0.0 0.0]
Attempted Cholesky decomposition of a matrix that is not positive
definite::
>>> A = -eye(3) + hilbert(3)
>>> L = cholesky(A)
Traceback (most recent call last):
...
ValueError: matrix is not positive-definite
**References**
1. [Wikipedia]_ http://en.wikipedia.org/wiki/Cholesky_decomposition
"""
if not isinstance(A, ctx.matrix):
raise RuntimeError("A should be a type of ctx.matrix")
if not A.rows == A.cols:
raise ValueError('need n*n matrix')
if tol is None:
tol = +ctx.eps
n = A.rows
L = ctx.matrix(n)
for j in xrange(n):
c = ctx.re(A[j,j])
if abs(c-A[j,j]) > tol:
raise ValueError('matrix is not Hermitian')
s = c - ctx.fsum((L[j,k] for k in xrange(j)),
absolute=True, squared=True)
if s < tol:
raise ValueError('matrix is not positive-definite')
L[j,j] = ctx.sqrt(s)
for i in xrange(j, n):
it1 = (L[i,k] for k in xrange(j))
it2 = (L[j,k] for k in xrange(j))
t = ctx.fdot(it1, it2, conjugate=True)
L[i,j] = (A[i,j] - t) / L[j,j]
return L
def cholesky_solve(ctx, A, b, **kwargs):
"""
Ax = b => x
Solve a symmetric positive-definite linear equation system.
This is twice as efficient as lu_solve.
Typical use cases:
* A.T*A
* Hessian matrix
* differential equations
"""
prec = ctx.prec
try:
ctx.prec += 10
# do not overwrite A nor b
A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
if A.rows != A.cols:
raise ValueError('can only solve determined system')
# Cholesky factorization
L = ctx.cholesky(A)
# solve
n = L.rows
if len(b) != n:
raise ValueError("Value should be equal to n")
for i in xrange(n):
b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i))
b[i] /= L[i,i]
x = ctx.U_solve(L.T, b)
return x
finally:
ctx.prec = prec
def det(ctx, A):
"""
Calculate the determinant of a matrix.
"""
prec = ctx.prec
try:
# do not overwrite A
A = ctx.matrix(A).copy()
# use LU factorization to calculate determinant
try:
R, p = ctx.LU_decomp(A)
except ZeroDivisionError:
return 0
z = 1
for i, e in enumerate(p):
if i != e:
z *= -1
for i in xrange(A.rows):
z *= R[i,i]
return z
finally:
ctx.prec = prec
def cond(ctx, A, norm=None):
"""
Calculate the condition number of a matrix using a specified matrix norm.
The condition number estimates the sensitivity of a matrix to errors.
Example: small input errors for ill-conditioned coefficient matrices
alter the solution of the system dramatically.
For ill-conditioned matrices it's recommended to use qr_solve() instead
of lu_solve(). This does not help with input errors however, it just avoids
to add additional errors.
Definition: cond(A) = ||A|| * ||A**-1||
"""
if norm is None:
norm = lambda x: ctx.mnorm(x,1)
return norm(A) * norm(ctx.inverse(A))
def lu_solve_mat(ctx, a, b):
"""Solve a * x = b where a and b are matrices."""
r = ctx.matrix(a.rows, b.cols)
for i in range(b.cols):
c = ctx.lu_solve(a, b.column(i))
for j in range(len(c)):
r[j, i] = c[j]
return r
def qr(ctx, A, mode = 'full', edps = 10):
"""
Compute a QR factorization $A = QR$ where
A is an m x n matrix of real or complex numbers where m >= n
mode has following meanings:
(1) mode = 'raw' returns two matrixes (A, tau) in the
internal format used by LAPACK
(2) mode = 'skinny' returns the leading n columns of Q
and n rows of R
(3) Any other value returns the leading m columns of Q
and m rows of R
edps is the increase in mp precision used for calculations
**Examples**
>>> from mpmath import *
>>> mp.dps = 15
>>> mp.pretty = True
>>> A = matrix([[1, 2], [3, 4], [1, 1]])
>>> Q, R = qr(A)
>>> Q
[-0.301511344577764 0.861640436855329 0.408248290463863]
[-0.904534033733291 -0.123091490979333 -0.408248290463863]
[-0.301511344577764 -0.492365963917331 0.816496580927726]
>>> R
[-3.3166247903554 -4.52267016866645]
[ 0.0 0.738548945875996]
[ 0.0 0.0]
>>> Q * R
[1.0 2.0]
[3.0 4.0]
[1.0 1.0]
>>> chop(Q.T * Q)
[1.0 0.0 0.0]
[0.0 1.0 0.0]
[0.0 0.0 1.0]
>>> B = matrix([[1+0j, 2-3j], [3+j, 4+5j]])
>>> Q, R = qr(B)
>>> nprint(Q)
[ (-0.301511 + 0.0j) (0.0695795 - 0.95092j)]
[(-0.904534 - 0.301511j) (-0.115966 + 0.278318j)]
>>> nprint(R)
[(-3.31662 + 0.0j) (-5.72872 - 2.41209j)]
[ 0.0 (3.91965 + 0.0j)]
>>> Q * R
[(1.0 + 0.0j) (2.0 - 3.0j)]
[(3.0 + 1.0j) (4.0 + 5.0j)]
>>> chop(Q.T * Q.conjugate())
[1.0 0.0]
[0.0 1.0]
"""
# check values before continuing
assert isinstance(A, ctx.matrix)
m = A.rows
n = A.cols
assert n >= 0
assert m >= n
assert edps >= 0
# check for complex data type
cmplx = any(type(x) is ctx.mpc for x in A)
# temporarily increase the precision and initialize
with ctx.extradps(edps):
tau = ctx.matrix(n,1)
A = A.copy()
# ---------------
# FACTOR MATRIX A
# ---------------
if cmplx:
one = ctx.mpc('1.0', '0.0')
zero = ctx.mpc('0.0', '0.0')
rzero = ctx.mpf('0.0')
# main loop to factor A (complex)
for j in xrange(0, n):
alpha = A[j,j]
alphr = ctx.re(alpha)
alphi = ctx.im(alpha)
if (m-j) >= 2:
xnorm = ctx.fsum( A[i,j]*ctx.conj(A[i,j]) for i in xrange(j+1, m) )
xnorm = ctx.re( ctx.sqrt(xnorm) )
else:
xnorm = rzero
if (xnorm == rzero) and (alphi == rzero):
tau[j] = zero
continue
if alphr < rzero:
beta = ctx.sqrt(alphr**2 + alphi**2 + xnorm**2)
else:
beta = -ctx.sqrt(alphr**2 + alphi**2 + xnorm**2)
tau[j] = ctx.mpc( (beta - alphr) / beta, -alphi / beta )
t = -ctx.conj(tau[j])
za = one / (alpha - beta)
for i in xrange(j+1, m):
A[i,j] *= za
A[j,j] = one
for k in xrange(j+1, n):
y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j, m))
temp = t * ctx.conj(y)
for i in xrange(j, m):
A[i,k] += A[i,j] * temp
A[j,j] = ctx.mpc(beta, '0.0')
else:
one = ctx.mpf('1.0')
zero = ctx.mpf('0.0')
# main loop to factor A (real)
for j in xrange(0, n):
alpha = A[j,j]
if (m-j) > 2:
xnorm = ctx.fsum( (A[i,j])**2 for i in xrange(j+1, m) )
xnorm = ctx.sqrt(xnorm)
elif (m-j) == 2:
xnorm = abs( A[m-1,j] )
else:
xnorm = zero
if xnorm == zero:
tau[j] = zero
continue
if alpha < zero:
beta = ctx.sqrt(alpha**2 + xnorm**2)
else:
beta = -ctx.sqrt(alpha**2 + xnorm**2)
tau[j] = (beta - alpha) / beta
t = -tau[j]
da = one / (alpha - beta)
for i in xrange(j+1, m):
A[i,j] *= da
A[j,j] = one
for k in xrange(j+1, n):
y = ctx.fsum( A[i,j] * A[i,k] for i in xrange(j, m) )
temp = t * y
for i in xrange(j,m):
A[i,k] += A[i,j] * temp
A[j,j] = beta
# return factorization in same internal format as LAPACK
if (mode == 'raw') or (mode == 'RAW'):
return A, tau
# ----------------------------------
# FORM Q USING BACKWARD ACCUMULATION
# ----------------------------------
# form R before the values are overwritten
R = A.copy()
for j in xrange(0, n):
for i in xrange(j+1, m):
R[i,j] = zero
# set the value of p (number of columns of Q to return)
p = m
if (mode == 'skinny') or (mode == 'SKINNY'):
p = n
# add columns to A if needed and initialize
A.cols += (p-n)
for j in xrange(0, p):
A[j,j] = one
for i in xrange(0, j):
A[i,j] = zero
# main loop to form Q
for j in xrange(n-1, -1, -1):
t = -tau[j]
A[j,j] += t
for k in xrange(j+1, p):
if cmplx:
y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j+1, m))
temp = t * ctx.conj(y)
else:
y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j+1, m))
temp = t * y
A[j,k] = temp
for i in xrange(j+1, m):
A[i,k] += A[i,j] * temp
for i in xrange(j+1, m):
A[i, j] *= t
return A, R[0:p,0:n]
# ------------------
# END OF FUNCTION QR
# ------------------
| bsd-3-clause | cf6a25d263f116f37128fa3450d795e1 | 33.124051 | 95 | 0.461978 | 3.568705 | false | false | false | false |
fredrik-johansson/mpmath | mpmath/functions/orthogonal.py | 15 | 16097 | from .functions import defun, defun_wrapped
def _hermite_param(ctx, n, z, parabolic_cylinder):
"""
Combined calculation of the Hermite polynomial H_n(z) (and its
generalization to complex n) and the parabolic cylinder
function D.
"""
n, ntyp = ctx._convert_param(n)
z = ctx.convert(z)
q = -ctx.mpq_1_2
# For re(z) > 0, 2F0 -- http://functions.wolfram.com/
# HypergeometricFunctions/HermiteHGeneral/06/02/0009/
# Otherwise, there is a reflection formula
# 2F0 + http://functions.wolfram.com/HypergeometricFunctions/
# HermiteHGeneral/16/01/01/0006/
#
# TODO:
# An alternative would be to use
# http://functions.wolfram.com/HypergeometricFunctions/
# HermiteHGeneral/06/02/0006/
#
# Also, the 1F1 expansion
# http://functions.wolfram.com/HypergeometricFunctions/
# HermiteHGeneral/26/01/02/0001/
# should probably be used for tiny z
if not z:
T1 = [2, ctx.pi], [n, 0.5], [], [q*(n-1)], [], [], 0
if parabolic_cylinder:
T1[1][0] += q*n
return T1,
can_use_2f0 = ctx.isnpint(-n) or ctx.re(z) > 0 or \
(ctx.re(z) == 0 and ctx.im(z) > 0)
expprec = ctx.prec*4 + 20
if parabolic_cylinder:
u = ctx.fmul(ctx.fmul(z,z,prec=expprec), -0.25, exact=True)
w = ctx.fmul(z, ctx.sqrt(0.5,prec=expprec), prec=expprec)
else:
w = z
w2 = ctx.fmul(w, w, prec=expprec)
rw2 = ctx.fdiv(1, w2, prec=expprec)
nrw2 = ctx.fneg(rw2, exact=True)
nw = ctx.fneg(w, exact=True)
if can_use_2f0:
T1 = [2, w], [n, n], [], [], [q*n, q*(n-1)], [], nrw2
terms = [T1]
else:
T1 = [2, nw], [n, n], [], [], [q*n, q*(n-1)], [], nrw2
T2 = [2, ctx.pi, nw], [n+2, 0.5, 1], [], [q*n], [q*(n-1)], [1-q], w2
terms = [T1,T2]
# Multiply by prefactor for D_n
if parabolic_cylinder:
expu = ctx.exp(u)
for i in range(len(terms)):
terms[i][1][0] += q*n
terms[i][0].append(expu)
terms[i][1].append(1)
return tuple(terms)
@defun
def hermite(ctx, n, z, **kwargs):
return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 0), [], **kwargs)
@defun
def pcfd(ctx, n, z, **kwargs):
r"""
Gives the parabolic cylinder function in Whittaker's notation
`D_n(z) = U(-n-1/2, z)` (see :func:`~mpmath.pcfu`).
It solves the differential equation
.. math ::
y'' + \left(n + \frac{1}{2} - \frac{1}{4} z^2\right) y = 0.
and can be represented in terms of Hermite polynomials
(see :func:`~mpmath.hermite`) as
.. math ::
D_n(z) = 2^{-n/2} e^{-z^2/4} H_n\left(\frac{z}{\sqrt{2}}\right).
**Plots**
.. literalinclude :: /plots/pcfd.py
.. image :: /plots/pcfd.png
**Examples**
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> pcfd(0,0); pcfd(1,0); pcfd(2,0); pcfd(3,0)
1.0
0.0
-1.0
0.0
>>> pcfd(4,0); pcfd(-3,0)
3.0
0.6266570686577501256039413
>>> pcfd('1/2', 2+3j)
(-5.363331161232920734849056 - 3.858877821790010714163487j)
>>> pcfd(2, -10)
1.374906442631438038871515e-9
Verifying the differential equation::
>>> n = mpf(2.5)
>>> y = lambda z: pcfd(n,z)
>>> z = 1.75
>>> chop(diff(y,z,2) + (n+0.5-0.25*z**2)*y(z))
0.0
Rational Taylor series expansion when `n` is an integer::
>>> taylor(lambda z: pcfd(5,z), 0, 7)
[0.0, 15.0, 0.0, -13.75, 0.0, 3.96875, 0.0, -0.6015625]
"""
return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 1), [], **kwargs)
@defun
def pcfu(ctx, a, z, **kwargs):
r"""
Gives the parabolic cylinder function `U(a,z)`, which may be
defined for `\Re(z) > 0` in terms of the confluent
U-function (see :func:`~mpmath.hyperu`) by
.. math ::
U(a,z) = 2^{-\frac{1}{4}-\frac{a}{2}} e^{-\frac{1}{4} z^2}
U\left(\frac{a}{2}+\frac{1}{4},
\frac{1}{2}, \frac{1}{2}z^2\right)
or, for arbitrary `z`,
.. math ::
e^{-\frac{1}{4}z^2} U(a,z) =
U(a,0) \,_1F_1\left(-\tfrac{a}{2}+\tfrac{1}{4};
\tfrac{1}{2}; -\tfrac{1}{2}z^2\right) +
U'(a,0) z \,_1F_1\left(-\tfrac{a}{2}+\tfrac{3}{4};
\tfrac{3}{2}; -\tfrac{1}{2}z^2\right).
**Examples**
Connection to other functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> z = mpf(3)
>>> pcfu(0.5,z)
0.03210358129311151450551963
>>> sqrt(pi/2)*exp(z**2/4)*erfc(z/sqrt(2))
0.03210358129311151450551963
>>> pcfu(0.5,-z)
23.75012332835297233711255
>>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2))
23.75012332835297233711255
>>> pcfu(0.5,-z)
23.75012332835297233711255
>>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2))
23.75012332835297233711255
"""
n, _ = ctx._convert_param(a)
return ctx.pcfd(-n-ctx.mpq_1_2, z)
@defun
def pcfv(ctx, a, z, **kwargs):
r"""
Gives the parabolic cylinder function `V(a,z)`, which can be
represented in terms of :func:`~mpmath.pcfu` as
.. math ::
V(a,z) = \frac{\Gamma(a+\tfrac{1}{2}) (U(a,-z)-\sin(\pi a) U(a,z)}{\pi}.
**Examples**
Wronskian relation between `U` and `V`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, z = 2, 3
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
0.7978845608028653558798921
>>> sqrt(2/pi)
0.7978845608028653558798921
>>> a, z = 2.5, 3
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
0.7978845608028653558798921
>>> a, z = 0.25, -1
>>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)
0.7978845608028653558798921
>>> a, z = 2+1j, 2+3j
>>> chop(pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z))
0.7978845608028653558798921
"""
n, ntype = ctx._convert_param(a)
z = ctx.convert(z)
q = ctx.mpq_1_2
r = ctx.mpq_1_4
if ntype == 'Q' and ctx.isint(n*2):
# Faster for half-integers
def h():
jz = ctx.fmul(z, -1j, exact=True)
T1terms = _hermite_param(ctx, -n-q, z, 1)
T2terms = _hermite_param(ctx, n-q, jz, 1)
for T in T1terms:
T[0].append(1j)
T[1].append(1)
T[3].append(q-n)
u = ctx.expjpi((q*n-r)) * ctx.sqrt(2/ctx.pi)
for T in T2terms:
T[0].append(u)
T[1].append(1)
return T1terms + T2terms
v = ctx.hypercomb(h, [], **kwargs)
if ctx._is_real_type(n) and ctx._is_real_type(z):
v = ctx._re(v)
return v
else:
def h(n):
w = ctx.square_exp_arg(z, -0.25)
u = ctx.square_exp_arg(z, 0.5)
e = ctx.exp(w)
l = [ctx.pi, q, ctx.exp(w)]
Y1 = l, [-q, n*q+r, 1], [r-q*n], [], [q*n+r], [q], u
Y2 = l + [z], [-q, n*q-r, 1, 1], [1-r-q*n], [], [q*n+1-r], [1+q], u
c, s = ctx.cospi_sinpi(r+q*n)
Y1[0].append(s)
Y2[0].append(c)
for Y in (Y1, Y2):
Y[1].append(1)
Y[3].append(q-n)
return Y1, Y2
return ctx.hypercomb(h, [n], **kwargs)
@defun
def pcfw(ctx, a, z, **kwargs):
r"""
Gives the parabolic cylinder function `W(a,z)` defined in (DLMF 12.14).
**Examples**
Value at the origin::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a = mpf(0.25)
>>> pcfw(a,0)
0.9722833245718180765617104
>>> power(2,-0.75)*sqrt(abs(gamma(0.25+0.5j*a)/gamma(0.75+0.5j*a)))
0.9722833245718180765617104
>>> diff(pcfw,(a,0),(0,1))
-0.5142533944210078966003624
>>> -power(2,-0.25)*sqrt(abs(gamma(0.75+0.5j*a)/gamma(0.25+0.5j*a)))
-0.5142533944210078966003624
"""
n, _ = ctx._convert_param(a)
z = ctx.convert(z)
def terms():
phi2 = ctx.arg(ctx.gamma(0.5 + ctx.j*n))
phi2 = (ctx.loggamma(0.5+ctx.j*n) - ctx.loggamma(0.5-ctx.j*n))/2j
rho = ctx.pi/8 + 0.5*phi2
# XXX: cancellation computing k
k = ctx.sqrt(1 + ctx.exp(2*ctx.pi*n)) - ctx.exp(ctx.pi*n)
C = ctx.sqrt(k/2) * ctx.exp(0.25*ctx.pi*n)
yield C * ctx.expj(rho) * ctx.pcfu(ctx.j*n, z*ctx.expjpi(-0.25))
yield C * ctx.expj(-rho) * ctx.pcfu(-ctx.j*n, z*ctx.expjpi(0.25))
v = ctx.sum_accurately(terms)
if ctx._is_real_type(n) and ctx._is_real_type(z):
v = ctx._re(v)
return v
"""
Even/odd PCFs. Useful?
@defun
def pcfy1(ctx, a, z, **kwargs):
a, _ = ctx._convert_param(n)
z = ctx.convert(z)
def h():
w = ctx.square_exp_arg(z)
w1 = ctx.fmul(w, -0.25, exact=True)
w2 = ctx.fmul(w, 0.5, exact=True)
e = ctx.exp(w1)
return [e], [1], [], [], [ctx.mpq_1_2*a+ctx.mpq_1_4], [ctx.mpq_1_2], w2
return ctx.hypercomb(h, [], **kwargs)
@defun
def pcfy2(ctx, a, z, **kwargs):
a, _ = ctx._convert_param(n)
z = ctx.convert(z)
def h():
w = ctx.square_exp_arg(z)
w1 = ctx.fmul(w, -0.25, exact=True)
w2 = ctx.fmul(w, 0.5, exact=True)
e = ctx.exp(w1)
return [e, z], [1, 1], [], [], [ctx.mpq_1_2*a+ctx.mpq_3_4], \
[ctx.mpq_3_2], w2
return ctx.hypercomb(h, [], **kwargs)
"""
@defun_wrapped
def gegenbauer(ctx, n, a, z, **kwargs):
# Special cases: a+0.5, a*2 poles
if ctx.isnpint(a):
return 0*(z+n)
if ctx.isnpint(a+0.5):
# TODO: something else is required here
# E.g.: gegenbauer(-2, -0.5, 3) == -12
if ctx.isnpint(n+1):
raise NotImplementedError("Gegenbauer function with two limits")
def h(a):
a2 = 2*a
T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z)
return [T]
return ctx.hypercomb(h, [a], **kwargs)
def h(n):
a2 = 2*a
T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z)
return [T]
return ctx.hypercomb(h, [n], **kwargs)
@defun_wrapped
def jacobi(ctx, n, a, b, x, **kwargs):
if not ctx.isnpint(a):
def h(n):
return (([], [], [a+n+1], [n+1, a+1], [-n, a+b+n+1], [a+1], (1-x)*0.5),)
return ctx.hypercomb(h, [n], **kwargs)
if not ctx.isint(b):
def h(n, a):
return (([], [], [-b], [n+1, -b-n], [-n, a+b+n+1], [b+1], (x+1)*0.5),)
return ctx.hypercomb(h, [n, a], **kwargs)
# XXX: determine appropriate limit
return ctx.binomial(n+a,n) * ctx.hyp2f1(-n,1+n+a+b,a+1,(1-x)/2, **kwargs)
@defun_wrapped
def laguerre(ctx, n, a, z, **kwargs):
# XXX: limits, poles
#if ctx.isnpint(n):
# return 0*(a+z)
def h(a):
return (([], [], [a+n+1], [a+1, n+1], [-n], [a+1], z),)
return ctx.hypercomb(h, [a], **kwargs)
@defun_wrapped
def legendre(ctx, n, x, **kwargs):
if ctx.isint(n):
n = int(n)
# Accuracy near zeros
if (n + (n < 0)) & 1:
if not x:
return x
mag = ctx.mag(x)
if mag < -2*ctx.prec-10:
return x
if mag < -5:
ctx.prec += -mag
return ctx.hyp2f1(-n,n+1,1,(1-x)/2, **kwargs)
@defun
def legenp(ctx, n, m, z, type=2, **kwargs):
# Legendre function, 1st kind
n = ctx.convert(n)
m = ctx.convert(m)
# Faster
if not m:
return ctx.legendre(n, z, **kwargs)
# TODO: correct evaluation at singularities
if type == 2:
def h(n,m):
g = m*0.5
T = [1+z, 1-z], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z)
return (T,)
return ctx.hypercomb(h, [n,m], **kwargs)
if type == 3:
def h(n,m):
g = m*0.5
T = [z+1, z-1], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z)
return (T,)
return ctx.hypercomb(h, [n,m], **kwargs)
raise ValueError("requires type=2 or type=3")
@defun
def legenq(ctx, n, m, z, type=2, **kwargs):
# Legendre function, 2nd kind
n = ctx.convert(n)
m = ctx.convert(m)
z = ctx.convert(z)
if z in (1, -1):
#if ctx.isint(m):
# return ctx.nan
#return ctx.inf # unsigned
return ctx.nan
if type == 2:
def h(n, m):
cos, sin = ctx.cospi_sinpi(m)
s = 2 * sin / ctx.pi
c = cos
a = 1+z
b = 1-z
u = m/2
w = (1-z)/2
T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \
[-n, n+1], [1-m], w
T2 = [-s, a, b], [-1, -u, u], [n+m+1], [n-m+1, m+1], \
[-n, n+1], [m+1], w
return T1, T2
return ctx.hypercomb(h, [n, m], **kwargs)
if type == 3:
# The following is faster when there only is a single series
# Note: not valid for -1 < z < 0 (?)
if abs(z) > 1:
def h(n, m):
T1 = [ctx.expjpi(m), 2, ctx.pi, z, z-1, z+1], \
[1, -n-1, 0.5, -n-m-1, 0.5*m, 0.5*m], \
[n+m+1], [n+1.5], \
[0.5*(2+n+m), 0.5*(1+n+m)], [n+1.5], z**(-2)
return [T1]
return ctx.hypercomb(h, [n, m], **kwargs)
else:
# not valid for 1 < z < inf ?
def h(n, m):
s = 2 * ctx.sinpi(m) / ctx.pi
c = ctx.expjpi(m)
a = 1+z
b = z-1
u = m/2
w = (1-z)/2
T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \
[-n, n+1], [1-m], w
T2 = [-s, c, a, b], [-1, 1, -u, u], [n+m+1], [n-m+1, m+1], \
[-n, n+1], [m+1], w
return T1, T2
return ctx.hypercomb(h, [n, m], **kwargs)
raise ValueError("requires type=2 or type=3")
@defun_wrapped
def chebyt(ctx, n, x, **kwargs):
if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1:
return x * 0
return ctx.hyp2f1(-n,n,(1,2),(1-x)/2, **kwargs)
@defun_wrapped
def chebyu(ctx, n, x, **kwargs):
if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1:
return x * 0
return (n+1) * ctx.hyp2f1(-n, n+2, (3,2), (1-x)/2, **kwargs)
@defun
def spherharm(ctx, l, m, theta, phi, **kwargs):
l = ctx.convert(l)
m = ctx.convert(m)
theta = ctx.convert(theta)
phi = ctx.convert(phi)
l_isint = ctx.isint(l)
l_natural = l_isint and l >= 0
m_isint = ctx.isint(m)
if l_isint and l < 0 and m_isint:
return ctx.spherharm(-(l+1), m, theta, phi, **kwargs)
if theta == 0 and m_isint and m < 0:
return ctx.zero * 1j
if l_natural and m_isint:
if abs(m) > l:
return ctx.zero * 1j
# http://functions.wolfram.com/Polynomials/
# SphericalHarmonicY/26/01/02/0004/
def h(l,m):
absm = abs(m)
C = [-1, ctx.expj(m*phi),
(2*l+1)*ctx.fac(l+absm)/ctx.pi/ctx.fac(l-absm),
ctx.sin(theta)**2,
ctx.fac(absm), 2]
P = [0.5*m*(ctx.sign(m)+1), 1, 0.5, 0.5*absm, -1, -absm-1]
return ((C, P, [], [], [absm-l, l+absm+1], [absm+1],
ctx.sin(0.5*theta)**2),)
else:
# http://functions.wolfram.com/HypergeometricFunctions/
# SphericalHarmonicYGeneral/26/01/02/0001/
def h(l,m):
if ctx.isnpint(l-m+1) or ctx.isnpint(l+m+1) or ctx.isnpint(1-m):
return (([0], [-1], [], [], [], [], 0),)
cos, sin = ctx.cos_sin(0.5*theta)
C = [0.5*ctx.expj(m*phi), (2*l+1)/ctx.pi,
ctx.gamma(l-m+1), ctx.gamma(l+m+1),
cos**2, sin**2]
P = [1, 0.5, 0.5, -0.5, 0.5*m, -0.5*m]
return ((C, P, [], [1-m], [-l,l+1], [1-m], sin**2),)
return ctx.hypercomb(h, [l,m], **kwargs)
| bsd-3-clause | f96018f121bda14360ed407079927924 | 31.651116 | 84 | 0.474498 | 2.543774 | false | false | false | false |
fredrik-johansson/mpmath | mpmath/calculus/approximation.py | 9 | 8817 | from ..libmp.backend import xrange
from .calculus import defun
#----------------------------------------------------------------------------#
# Approximation methods #
#----------------------------------------------------------------------------#
# The Chebyshev approximation formula is given at:
# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
# The only major changes in the following code is that we return the
# expanded polynomial coefficients instead of Chebyshev coefficients,
# and that we automatically transform [a,b] -> [-1,1] and back
# for convenience.
# Coefficient in Chebyshev approximation
def chebcoeff(ctx,f,a,b,j,N):
s = ctx.mpf(0)
h = ctx.mpf(0.5)
for k in range(1, N+1):
t = ctx.cospi((k-h)/N)
s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
return 2*s/N
# Generate Chebyshev polynomials T_n(ax+b) in expanded form
def chebT(ctx, a=1, b=0):
Tb = [1]
yield Tb
Ta = [b, a]
while 1:
yield Ta
# Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
Tmp = [0] + [2*a*t for t in Ta]
for i, c in enumerate(Ta): Tmp[i] += 2*b*c
for i, c in enumerate(Tb): Tmp[i] -= c
Ta, Tb = Tmp, Ta
@defun
def chebyfit(ctx, f, interval, N, error=False):
r"""
Computes a polynomial of degree `N-1` that approximates the
given function `f` on the interval `[a, b]`. With ``error=True``,
:func:`~mpmath.chebyfit` also returns an accurate estimate of the
maximum absolute error; that is, the maximum value of
`|f(x) - P(x)|` for `x \in [a, b]`.
:func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
which gives a nearly optimal solution: that is, the maximum
error of the approximating polynomial is very close to
the smallest possible for any polynomial of the same degree.
Chebyshev approximation is very useful if one needs repeated
evaluation of an expensive function, such as function defined
implicitly by an integral or a differential equation. (For
example, it could be used to turn a slow mpmath function
into a fast machine-precision version of the same.)
**Examples**
Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
>>> nprint(poly)
[0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
>>> nprint(err, 12)
1.61351758081e-5
The polynomial can be evaluated using ``polyval``::
>>> nprint(polyval(poly, 1.6), 12)
-0.0291858904138
>>> nprint(cos(1.6), 12)
-0.0291995223013
Sampling the true error at 1000 points shows that the error
estimate generated by ``chebyfit`` is remarkably good::
>>> error = lambda x: abs(cos(x) - polyval(poly, x))
>>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
1.61349954245e-5
**Choice of degree**
The degree `N` can be set arbitrarily high, to obtain an
arbitrarily good approximation. As a rule of thumb, an
`N`-term Chebyshev approximation is good to `N/(b-a)` decimal
places on a unit interval (although this depends on how
well-behaved `f` is). The cost grows accordingly: ``chebyfit``
evaluates the function `(N^2)/2` times to compute the
coefficients and an additional `N` times to estimate the error.
**Possible issues**
One should be careful to use a sufficiently high working
precision both when calling ``chebyfit`` and when evaluating
the resulting polynomial, as the polynomial is sometimes
ill-conditioned. It is for example difficult to reach
15-digit accuracy when evaluating the polynomial using
machine precision floats, no matter the theoretical
accuracy of the polynomial. (The option to return the
coefficients in Chebyshev form should be made available
in the future.)
It is important to note the Chebyshev approximation works
poorly if `f` is not smooth. A function containing singularities,
rapid oscillation, etc can be approximated more effectively by
multiplying it by a weight function that cancels out the
nonsmooth features, or by dividing the interval into several
segments.
"""
a, b = ctx._as_points(interval)
orig = ctx.prec
try:
ctx.prec = orig + int(N**0.5) + 20
c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
d = [ctx.zero] * N
d[0] = -c[0]/2
h = ctx.mpf(0.5)
T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
for (k, Tk) in zip(range(N), T):
for i in range(len(Tk)):
d[i] += c[k]*Tk[i]
d = d[::-1]
# Estimate maximum error
err = ctx.zero
for k in range(N):
x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
err = max(err, abs(f(x) - ctx.polyval(d, x)))
finally:
ctx.prec = orig
if error:
return d, +err
else:
return d
@defun
def fourier(ctx, f, interval, N):
r"""
Computes the Fourier series of degree `N` of the given function
on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
two lists `(c, s)` of coefficients (the cosine series and sine
series, respectively), such that
.. math ::
f(x) \sim \sum_{k=0}^N
c_k \cos(k m x) + s_k \sin(k m x)
where `m = 2 \pi / (b-a)`.
Note that many texts define the first coefficient as `2 c_0` instead
of `c_0`. The easiest way to evaluate the computed series correctly
is to pass it to :func:`~mpmath.fourierval`.
**Examples**
The function `f(x) = x` has a simple Fourier series on the standard
interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
the function has odd symmetry), and the sine coefficients are
rational numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> c, s = fourier(lambda x: x, [-pi, pi], 5)
>>> nprint(c)
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint(s)
[0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
This computes a Fourier series of a nonsymmetric function on
a nonstandard interval::
>>> I = [-1, 1.5]
>>> f = lambda x: x**2 - 4*x + 1
>>> cs = fourier(f, I, 4)
>>> nprint(cs[0])
[0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
>>> nprint(cs[1])
[0.0, -2.6255, 0.580905, 0.219974, -0.540057]
It is instructive to plot a function along with its truncated
Fourier series::
>>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
Fourier series generally converge slowly (and may not converge
pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
series gives an `L^2` error corresponding to 2-digit accuracy::
>>> I = [-1, 1]
>>> cs = fourier(cosh, I, 9)
>>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
>>> nprint(sqrt(quad(g, I)))
0.00467963
:func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
the accuracy (and speed) can be improved by including all singular
points in the interval specification::
>>> nprint(fourier(abs, [-1, 1], 0), 10)
([0.5000441648], [0.0])
>>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
([0.5], [0.0])
"""
interval = ctx._as_points(interval)
a = interval[0]
b = interval[-1]
L = b-a
cos_series = []
sin_series = []
cutoff = ctx.eps*10
for n in xrange(N+1):
m = 2*n*ctx.pi/L
an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
if n == 0:
an /= 2
if abs(an) < cutoff: an = ctx.zero
if abs(bn) < cutoff: bn = ctx.zero
cos_series.append(an)
sin_series.append(bn)
return cos_series, sin_series
@defun
def fourierval(ctx, series, interval, x):
"""
Evaluates a Fourier series (in the format computed by
by :func:`~mpmath.fourier` for the given interval) at the point `x`.
The series should be a pair `(c, s)` where `c` is the
cosine series and `s` is the sine series. The two lists
need not have the same length.
"""
cs, ss = series
ab = ctx._as_points(interval)
a = interval[0]
b = interval[-1]
m = 2*ctx.pi/(ab[-1]-ab[0])
s = ctx.zero
s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
return s
| bsd-3-clause | d83bd46ccc217eaaa2a9550f0e3651d9 | 34.841463 | 79 | 0.587388 | 3.255908 | false | false | false | false |
fredrik-johansson/mpmath | mpmath/tests/test_matrices.py | 1 | 7944 | import pytest
import sys
from mpmath import *
def test_matrix_basic():
A1 = matrix(3)
for i in range(3):
A1[i,i] = 1
assert A1 == eye(3)
assert A1 == matrix(A1)
A2 = matrix(3, 2)
assert not A2._matrix__data
A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert list(A3) == list(range(1, 10))
A3[1,1] = 0
assert not (1, 1) in A3._matrix__data
A4 = matrix([[1, 2, 3], [4, 5, 6]])
A5 = matrix([[6, -1], [3, 2], [0, -3]])
assert A4 * A5 == matrix([[12, -6], [39, -12]])
assert A1 * A3 == A3 * A1 == A3
pytest.raises(ValueError, lambda: A2*A2)
l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]]
A6 = matrix(l)
assert A6.tolist() == l
assert A6 == eval(repr(A6))
A6 = fp.matrix(A6)
assert A6 == eval(repr(A6))
assert A6*1j == eval(repr(A6*1j))
assert A3 * 10 == 10 * A3 == A6
assert A2.rows == 3
assert A2.cols == 2
A3.rows = 2
A3.cols = 2
assert len(A3._matrix__data) == 3
assert A4 + A4 == 2*A4
pytest.raises(ValueError, lambda: A4 + A2)
assert sum(A1 - A1) == 0
A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]])
x = matrix([10, -10])
assert A7*x == matrix([-10, -10, -10, -10])
A8 = ones(5)
assert sum((A8 + 1) - (2 - zeros(5))) == 0
assert (1 + ones(4)) / 2 - 1 == zeros(4)
assert eye(3)**10 == eye(3)
pytest.raises(ValueError, lambda: A7**2)
A9 = randmatrix(3)
A10 = matrix(A9)
A9[0,0] = -100
assert A9 != A10
assert nstr(A9)
def test_matmul():
"""
Test the PEP465 "@" matrix multiplication syntax.
To avoid syntax errors when importing this file in Python 3.5 and below, we have to use exec() - sorry for that.
"""
# TODO remove exec() wrapper as soon as we drop support for Python <= 3.5
if sys.hexversion < 0x30500f0:
# we are on Python < 3.5
pytest.skip("'@' (__matmul__) is only supported in Python 3.5 or newer")
A4 = matrix([[1, 2, 3], [4, 5, 6]])
A5 = matrix([[6, -1], [3, 2], [0, -3]])
exec("assert A4 @ A5 == A4 * A5")
def test_matrix_slices():
A = matrix([ [1, 2, 3],
[4, 5 ,6],
[7, 8 ,9]])
V = matrix([1,2,3,4,5])
# Get slice
assert A[:,:] == A
assert A[:,1] == matrix([[2],[5],[8]])
assert A[2,:] == matrix([[7, 8 ,9]])
assert A[1:3,1:3] == matrix([[5,6],[8,9]])
assert V[2:4] == matrix([3,4])
pytest.raises(IndexError, lambda: A[:,1:6])
# Assign slice with matrix
A1 = matrix(3)
A1[:,:] = A
assert A1[:,:] == matrix([[1, 2, 3],
[4, 5 ,6],
[7, 8 ,9]])
A1[0,:] = matrix([[10, 11, 12]])
assert A1 == matrix([ [10, 11, 12],
[4, 5 ,6],
[7, 8 ,9]])
A1[:,2] = matrix([[13], [14], [15]])
assert A1 == matrix([ [10, 11, 13],
[4, 5 ,14],
[7, 8 ,15]])
A1[:2,:2] = matrix([[16, 17], [18 , 19]])
assert A1 == matrix([ [16, 17, 13],
[18, 19 ,14],
[7, 8 ,15]])
V[1:3] = 10
assert V == matrix([1,10,10,4,5])
with pytest.raises(ValueError):
A1[2,:] = A[:,1]
with pytest.raises(IndexError):
A1[2,1:20] = A[:,:]
# Assign slice with scalar
A1[:,2] = 10
assert A1 == matrix([ [16, 17, 10],
[18, 19 ,10],
[7, 8 ,10]])
A1[:,:] = 40
for x in A1:
assert x == 40
def test_matrix_power():
A = matrix([[1, 2], [3, 4]])
assert A**2 == A*A
assert A**3 == A*A*A
assert A**-1 == inverse(A)
assert A**-2 == inverse(A*A)
def test_matrix_transform():
A = matrix([[1, 2], [3, 4], [5, 6]])
assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]])
swap_row(A, 1, 2)
assert A == matrix([[1, 2], [5, 6], [3, 4]])
l = [1, 2]
swap_row(l, 0, 1)
assert l == [2, 1]
assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]])
def test_matrix_conjugate():
A = matrix([[1 + j, 0], [2, j]])
assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]])
assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2],
[0, mpc(0, -1)]])
def test_matrix_creation():
assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
A1 = ones(2, 3)
assert A1.rows == 2 and A1.cols == 3
for a in A1:
assert a == 1
A2 = zeros(3, 2)
assert A2.rows == 3 and A2.cols == 2
for a in A2:
assert a == 0
assert randmatrix(10) != randmatrix(10)
one = mpf(1)
assert hilbert(3) == matrix([[one, one/2, one/3],
[one/2, one/3, one/4],
[one/3, one/4, one/5]])
def test_norms():
# matrix norms
A = matrix([[1, -2], [-3, -1], [2, 1]])
assert mnorm(A,1) == 6
assert mnorm(A,inf) == 4
assert mnorm(A,'F') == sqrt(20)
# vector norms
assert norm(-3) == 3
x = [1, -2, 7, -12]
assert norm(x, 1) == 22
assert round(norm(x, 2), 10) == 14.0712472795
assert round(norm(x, 10), 10) == 12.0054633727
assert norm(x, inf) == 12
def test_vector():
x = matrix([0, 1, 2, 3, 4])
assert x == matrix([[0], [1], [2], [3], [4]])
assert x[3] == 3
assert len(x._matrix__data) == 4
assert list(x) == list(range(5))
x[0] = -10
x[4] = 0
assert x[0] == -10
assert len(x) == len(x.T) == 5
assert x.T*x == matrix([[114]])
def test_matrix_copy():
A = ones(6)
B = A.copy()
C = +A
assert A == B
assert A == C
B[0,0] = 0
assert A != B
C[0,0] = 42
assert A != C
def test_matrix_numpy():
try:
import numpy
except ImportError:
return
l = [[1, 2], [3, 4], [5, 6]]
a = numpy.array(l)
assert matrix(l) == matrix(a)
def test_interval_matrix_scalar_mult():
"""Multiplication of iv.matrix and any scalar type"""
a = mpi(-1, 1)
b = a + a * 2j
c = mpf(42)
d = c + c * 2j
e = 1.234
f = fp.convert(e)
g = e + e * 3j
h = fp.convert(g)
M = iv.ones(1)
for x in [a, b, c, d, e, f, g, h]:
assert x * M == iv.matrix([x])
assert M * x == iv.matrix([x])
@pytest.mark.xfail()
def test_interval_matrix_matrix_mult():
"""Multiplication of iv.matrix and other matrix types"""
A = ones(1)
B = fp.ones(1)
M = iv.ones(1)
for X in [A, B, M]:
assert X * M == iv.matrix(X)
assert X * M == X
assert M * X == iv.matrix(X)
assert M * X == X
def test_matrix_conversion_to_iv():
# Test that matrices with foreign datatypes are properly converted
for other_type_eye in [eye(3), fp.eye(3), iv.eye(3)]:
A = iv.matrix(other_type_eye)
B = iv.eye(3)
assert type(A[0,0]) == type(B[0,0])
assert A.tolist() == B.tolist()
def test_interval_matrix_mult_bug():
# regression test for interval matrix multiplication:
# result must be nonzero-width and contain the exact result
x = convert('1.00000000000001') # note: this is implicitly rounded to some near mpf float value
A = matrix([[x]])
B = iv.matrix(A)
C = iv.matrix([[x]])
assert B == C
B = B * B
C = C * C
assert B == C
assert B[0, 0].delta > 1e-16
assert B[0, 0].delta < 3e-16
assert C[0, 0].delta > 1e-16
assert C[0, 0].delta < 3e-16
assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in B[0, 0]
assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in C[0, 0]
# the following caused an error before the bug was fixed
assert iv.matrix(mp.eye(2)) * (iv.ones(2) + mpi(1, 2)) == iv.matrix([[mpi(2, 3), mpi(2, 3)], [mpi(2, 3), mpi(2, 3)]])
| bsd-3-clause | c46601a98df619d5f843c9e246421d43 | 30.399209 | 121 | 0.4786 | 2.828053 | false | true | false | false |
fredrik-johansson/mpmath | mpmath/tests/test_functions2.py | 2 | 96990 | import math
import pytest
from mpmath import *
def test_bessel():
mp.dps = 15
assert j0(1).ae(0.765197686557966551)
assert j0(pi).ae(-0.304242177644093864)
assert j0(1000).ae(0.0247866861524201746)
assert j0(-25).ae(0.0962667832759581162)
assert j1(1).ae(0.440050585744933516)
assert j1(pi).ae(0.284615343179752757)
assert j1(1000).ae(0.00472831190708952392)
assert j1(-25).ae(0.125350249580289905)
assert besselj(5,1).ae(0.000249757730211234431)
assert besselj(5+0j,1).ae(0.000249757730211234431)
assert besselj(5,pi).ae(0.0521411843671184747)
assert besselj(5,1000).ae(0.00502540694523318607)
assert besselj(5,-25).ae(0.0660079953984229934)
assert besselj(-3,2).ae(-0.128943249474402051)
assert besselj(-4,2).ae(0.0339957198075684341)
assert besselj(3,3+2j).ae(0.424718794929639595942 + 0.625665327745785804812j)
assert besselj(0.25,4).ae(-0.374760630804249715)
assert besselj(1+2j,3+4j).ae(0.319247428741872131 - 0.669557748880365678j)
assert (besselj(3, 10**10) * 10**5).ae(0.76765081748139204023)
assert bessely(-0.5, 0) == 0
assert bessely(0.5, 0) == -inf
assert bessely(1.5, 0) == -inf
assert bessely(0,0) == -inf
assert bessely(-0.4, 0) == -inf
assert bessely(-0.6, 0) == inf
assert bessely(-1, 0) == inf
assert bessely(-1.4, 0) == inf
assert bessely(-1.6, 0) == -inf
assert bessely(-1, 0) == inf
assert bessely(-2, 0) == -inf
assert bessely(-3, 0) == inf
assert bessely(0.5, 0) == -inf
assert bessely(1, 0) == -inf
assert bessely(1.5, 0) == -inf
assert bessely(2, 0) == -inf
assert bessely(2.5, 0) == -inf
assert bessely(3, 0) == -inf
assert bessely(0,0.5).ae(-0.44451873350670655715)
assert bessely(1,0.5).ae(-1.4714723926702430692)
assert bessely(-1,0.5).ae(1.4714723926702430692)
assert bessely(3.5,0.5).ae(-138.86400867242488443)
assert bessely(0,3+4j).ae(4.6047596915010138655-8.8110771408232264208j)
assert bessely(0,j).ae(-0.26803248203398854876+1.26606587775200833560j)
assert (bessely(3, 10**10) * 10**5).ae(0.21755917537013204058)
assert besseli(0,0) == 1
assert besseli(1,0) == 0
assert besseli(2,0) == 0
assert besseli(-1,0) == 0
assert besseli(-2,0) == 0
assert besseli(0,0.5).ae(1.0634833707413235193)
assert besseli(1,0.5).ae(0.25789430539089631636)
assert besseli(-1,0.5).ae(0.25789430539089631636)
assert besseli(3.5,0.5).ae(0.00068103597085793815863)
assert besseli(0,3+4j).ae(-3.3924877882755196097-1.3239458916287264815j)
assert besseli(0,j).ae(besselj(0,1))
assert (besseli(3, 10**10) * mpf(10)**(-4342944813)).ae(4.2996028505491271875)
assert besselk(0,0) == inf
assert besselk(1,0) == inf
assert besselk(2,0) == inf
assert besselk(-1,0) == inf
assert besselk(-2,0) == inf
assert besselk(0,0.5).ae(0.92441907122766586178)
assert besselk(1,0.5).ae(1.6564411200033008937)
assert besselk(-1,0.5).ae(1.6564411200033008937)
assert besselk(3.5,0.5).ae(207.48418747548460607)
assert besselk(0,3+4j).ae(-0.007239051213570155013+0.026510418350267677215j)
assert besselk(0,j).ae(-0.13863371520405399968-1.20196971531720649914j)
assert (besselk(3, 10**10) * mpf(10)**4342944824).ae(1.1628981033356187851)
# test for issue 331, bug reported by Michael Hartmann
for n in range(10,100,10):
mp.dps = n
assert besseli(91.5,24.7708).ae("4.00830632138673963619656140653537080438462342928377020695738635559218797348548092636896796324190271316137982810144874264e-41")
def test_bessel_zeros():
mp.dps = 15
assert besseljzero(0,1).ae(2.40482555769577276869)
assert besseljzero(2,1).ae(5.1356223018406825563)
assert besseljzero(1,50).ae(157.86265540193029781)
assert besseljzero(10,1).ae(14.475500686554541220)
assert besseljzero(0.5,3).ae(9.4247779607693797153)
assert besseljzero(2,1,1).ae(3.0542369282271403228)
assert besselyzero(0,1).ae(0.89357696627916752158)
assert besselyzero(2,1).ae(3.3842417671495934727)
assert besselyzero(1,50).ae(156.29183520147840108)
assert besselyzero(10,1).ae(12.128927704415439387)
assert besselyzero(0.5,3).ae(7.8539816339744830962)
assert besselyzero(2,1,1).ae(5.0025829314460639452)
def test_hankel():
mp.dps = 15
assert hankel1(0,0.5).ae(0.93846980724081290423-0.44451873350670655715j)
assert hankel1(1,0.5).ae(0.2422684576748738864-1.4714723926702430692j)
assert hankel1(-1,0.5).ae(-0.2422684576748738864+1.4714723926702430692j)
assert hankel1(1.5,0.5).ae(0.0917016996256513026-2.5214655504213378514j)
assert hankel1(1.5,3+4j).ae(0.0066806866476728165382-0.0036684231610839127106j)
assert hankel2(0,0.5).ae(0.93846980724081290423+0.44451873350670655715j)
assert hankel2(1,0.5).ae(0.2422684576748738864+1.4714723926702430692j)
assert hankel2(-1,0.5).ae(-0.2422684576748738864-1.4714723926702430692j)
assert hankel2(1.5,0.5).ae(0.0917016996256513026+2.5214655504213378514j)
assert hankel2(1.5,3+4j).ae(14.783528526098567526-7.397390270853446512j)
def test_struve():
mp.dps = 15
assert struveh(2,3).ae(0.74238666967748318564)
assert struveh(-2.5,3).ae(0.41271003220971599344)
assert struvel(2,3).ae(1.7476573277362782744)
assert struvel(-2.5,3).ae(1.5153394466819651377)
def test_whittaker():
mp.dps = 15
assert whitm(2,3,4).ae(49.753745589025246591)
assert whitw(2,3,4).ae(14.111656223052932215)
def test_kelvin():
mp.dps = 15
assert ber(2,3).ae(0.80836846563726819091)
assert ber(3,4).ae(-0.28262680167242600233)
assert ber(-3,2).ae(-0.085611448496796363669)
assert bei(2,3).ae(-0.89102236377977331571)
assert bei(-3,2).ae(-0.14420994155731828415)
assert ker(2,3).ae(0.12839126695733458928)
assert ker(-3,2).ae(-0.29802153400559142783)
assert ker(0.5,3).ae(-0.085662378535217097524)
assert kei(2,3).ae(0.036804426134164634000)
assert kei(-3,2).ae(0.88682069845786731114)
assert kei(0.5,3).ae(0.013633041571314302948)
def test_hyper_misc():
mp.dps = 15
assert hyp0f1(1,0) == 1
assert hyp1f1(1,2,0) == 1
assert hyp1f2(1,2,3,0) == 1
assert hyp2f1(1,2,3,0) == 1
assert hyp2f2(1,2,3,4,0) == 1
assert hyp2f3(1,2,3,4,5,0) == 1
# Degenerate case: 0F0
assert hyper([],[],0) == 1
assert hyper([],[],-2).ae(exp(-2))
# Degenerate case: 1F0
assert hyper([2],[],1.5) == 4
#
assert hyp2f1((1,3),(2,3),(5,6),mpf(27)/32).ae(1.6)
assert hyp2f1((1,4),(1,2),(3,4),mpf(80)/81).ae(1.8)
assert hyp2f1((2,3),(1,1),(3,2),(2+j)/3).ae(1.327531603558679093+0.439585080092769253j)
mp.dps = 25
v = mpc('1.2282306665029814734863026', '-0.1225033830118305184672133')
assert hyper([(3,4),2+j,1],[1,5,j/3],mpf(1)/5+j/8).ae(v)
mp.dps = 15
def test_elliptic_integrals():
mp.dps = 15
assert ellipk(0).ae(pi/2)
assert ellipk(0.5).ae(gamma(0.25)**2/(4*sqrt(pi)))
assert ellipk(1) == inf
assert ellipk(1+0j) == inf
assert ellipk(-1).ae('1.3110287771460599052')
assert ellipk(-2).ae('1.1714200841467698589')
assert isinstance(ellipk(-2), mpf)
assert isinstance(ellipe(-2), mpf)
assert ellipk(-50).ae('0.47103424540873331679')
mp.dps = 30
n1 = +fraction(99999,100000)
n2 = +fraction(100001,100000)
mp.dps = 15
assert ellipk(n1).ae('7.1427724505817781901')
assert ellipk(n2).ae(mpc('7.1427417367963090109', '-1.5707923998261688019'))
assert ellipe(n1).ae('1.0000332138990829170')
v = ellipe(n2)
assert v.real.ae('0.999966786328145474069137')
assert (v.imag*10**6).ae('7.853952181727432')
assert ellipk(2).ae(mpc('1.3110287771460599052', '-1.3110287771460599052'))
assert ellipk(50).ae(mpc('0.22326753950210985451', '-0.47434723226254522087'))
assert ellipk(3+4j).ae(mpc('0.91119556380496500866', '0.63133428324134524388'))
assert ellipk(3-4j).ae(mpc('0.91119556380496500866', '-0.63133428324134524388'))
assert ellipk(-3+4j).ae(mpc('0.95357894880405122483', '0.23093044503746114444'))
assert ellipk(-3-4j).ae(mpc('0.95357894880405122483', '-0.23093044503746114444'))
assert isnan(ellipk(nan))
assert isnan(ellipe(nan))
assert ellipk(inf) == 0
assert isinstance(ellipk(inf), mpc)
assert ellipk(-inf) == 0
assert ellipk(1+0j) == inf
assert ellipe(0).ae(pi/2)
assert ellipe(0.5).ae(pi**(mpf(3)/2)/gamma(0.25)**2 +gamma(0.25)**2/(8*sqrt(pi)))
assert ellipe(1) == 1
assert ellipe(1+0j) == 1
assert ellipe(inf) == mpc(0,inf)
assert ellipe(-inf) == inf
assert ellipe(3+4j).ae(1.4995535209333469543-1.5778790079127582745j)
assert ellipe(3-4j).ae(1.4995535209333469543+1.5778790079127582745j)
assert ellipe(-3+4j).ae(2.5804237855343377803-0.8306096791000413778j)
assert ellipe(-3-4j).ae(2.5804237855343377803+0.8306096791000413778j)
assert ellipe(2).ae(0.59907011736779610372+0.59907011736779610372j)
assert ellipe('1e-1000000000').ae(pi/2)
assert ellipk('1e-1000000000').ae(pi/2)
assert ellipe(-pi).ae(2.4535865983838923)
mp.dps = 50
assert ellipk(1/pi).ae('1.724756270009501831744438120951614673874904182624739673')
assert ellipe(1/pi).ae('1.437129808135123030101542922290970050337425479058225712')
assert ellipk(-10*pi).ae('0.5519067523886233967683646782286965823151896970015484512')
assert ellipe(-10*pi).ae('5.926192483740483797854383268707108012328213431657645509')
v = ellipk(pi)
assert v.real.ae('0.973089521698042334840454592642137667227167622330325225')
assert v.imag.ae('-1.156151296372835303836814390793087600271609993858798016')
v = ellipe(pi)
assert v.real.ae('0.4632848917264710404078033487934663562998345622611263332')
assert v.imag.ae('1.0637961621753130852473300451583414489944099504180510966')
mp.dps = 15
def test_exp_integrals():
mp.dps = 15
x = +e
z = e + sqrt(3)*j
assert ei(x).ae(8.21168165538361560)
assert li(x).ae(1.89511781635593676)
assert si(x).ae(1.82104026914756705)
assert ci(x).ae(0.213958001340379779)
assert shi(x).ae(4.11520706247846193)
assert chi(x).ae(4.09647459290515367)
assert fresnels(x).ae(0.437189718149787643)
assert fresnelc(x).ae(0.401777759590243012)
assert airyai(x).ae(0.0108502401568586681)
assert airybi(x).ae(8.98245748585468627)
assert ei(z).ae(3.72597969491314951 + 7.34213212314224421j)
assert li(z).ae(2.28662658112562502 + 1.50427225297269364j)
assert si(z).ae(2.48122029237669054 + 0.12684703275254834j)
assert ci(z).ae(0.169255590269456633 - 0.892020751420780353j)
assert shi(z).ae(1.85810366559344468 + 3.66435842914920263j)
assert chi(z).ae(1.86787602931970484 + 3.67777369399304159j)
assert fresnels(z/3).ae(0.034534397197008182 + 0.754859844188218737j)
assert fresnelc(z/3).ae(1.261581645990027372 + 0.417949198775061893j)
assert airyai(z).ae(-0.0162552579839056062 - 0.0018045715700210556j)
assert airybi(z).ae(-4.98856113282883371 + 2.08558537872180623j)
assert li(0) == 0.0
assert li(1) == -inf
assert li(inf) == inf
assert isinstance(li(0.7), mpf)
assert si(inf).ae(pi/2)
assert si(-inf).ae(-pi/2)
assert ci(inf) == 0
assert ci(0) == -inf
assert isinstance(ei(-0.7), mpf)
assert airyai(inf) == 0
assert airybi(inf) == inf
assert airyai(-inf) == 0
assert airybi(-inf) == 0
assert fresnels(inf) == 0.5
assert fresnelc(inf) == 0.5
assert fresnels(-inf) == -0.5
assert fresnelc(-inf) == -0.5
assert shi(0) == 0
assert shi(inf) == inf
assert shi(-inf) == -inf
assert chi(0) == -inf
assert chi(inf) == inf
def test_ei():
mp.dps = 15
assert ei(0) == -inf
assert ei(inf) == inf
assert ei(-inf) == -0.0
assert ei(20+70j).ae(6.1041351911152984397e6 - 2.7324109310519928872e6j)
# tests for the asymptotic expansion
# values checked with Mathematica ExpIntegralEi
mp.dps = 50
r = ei(20000)
s = '3.8781962825045010930273870085501819470698476975019e+8681'
assert str(r) == s
r = ei(-200)
s = '-6.8852261063076355977108174824557929738368086933303e-90'
assert str(r) == s
r =ei(20000 + 10*j)
sre = '-3.255138234032069402493850638874410725961401274106e+8681'
sim = '-2.1081929993474403520785942429469187647767369645423e+8681'
assert str(r.real) == sre and str(r.imag) == sim
mp.dps = 15
# More asymptotic expansions
assert chi(-10**6+100j).ae('1.3077239389562548386e+434288 + 7.6808956999707408158e+434287j')
assert shi(-10**6+100j).ae('-1.3077239389562548386e+434288 - 7.6808956999707408158e+434287j')
mp.dps = 15
assert ei(10j).ae(-0.0454564330044553726+3.2291439210137706686j)
assert ei(100j).ae(-0.0051488251426104921+3.1330217936839529126j)
u = ei(fmul(10**20, j, exact=True))
assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps)
assert u.imag.ae(pi)
assert ei(-10j).ae(-0.0454564330044553726-3.2291439210137706686j)
assert ei(-100j).ae(-0.0051488251426104921-3.1330217936839529126j)
u = ei(fmul(-10**20, j, exact=True))
assert u.real.ae(-6.4525128526578084421345e-21, abs_eps=0, rel_eps=8*eps)
assert u.imag.ae(-pi)
assert ei(10+10j).ae(-1576.1504265768517448+436.9192317011328140j)
u = ei(-10+10j)
assert u.real.ae(7.6698978415553488362543e-7, abs_eps=0, rel_eps=8*eps)
assert u.imag.ae(3.141595611735621062025)
def test_e1():
mp.dps = 15
assert e1(0) == inf
assert e1(inf) == 0
assert e1(-inf) == mpc(-inf, -pi)
assert e1(10j).ae(0.045456433004455372635 + 0.087551267423977430100j)
assert e1(100j).ae(0.0051488251426104921444 - 0.0085708599058403258790j)
assert e1(fmul(10**20, j, exact=True)).ae(6.4525128526578084421e-21 - 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps)
assert e1(-10j).ae(0.045456433004455372635 - 0.087551267423977430100j)
assert e1(-100j).ae(0.0051488251426104921444 + 0.0085708599058403258790j)
assert e1(fmul(-10**20, j, exact=True)).ae(6.4525128526578084421e-21 + 7.6397040444172830039e-21j, abs_eps=0, rel_eps=8*eps)
def test_expint():
mp.dps = 15
assert expint(0,0) == inf
assert expint(0,1).ae(1/e)
assert expint(0,1.5).ae(2/exp(1.5)/3)
assert expint(1,1).ae(-ei(-1))
assert expint(2,0).ae(1)
assert expint(3,0).ae(1/2.)
assert expint(4,0).ae(1/3.)
assert expint(-2, 0.5).ae(26/sqrt(e))
assert expint(-1,-1) == 0
assert expint(-2,-1).ae(-e)
assert expint(5.5, 0).ae(2/9.)
assert expint(2.00000001,0).ae(100000000./100000001)
assert expint(2+3j,4-j).ae(0.0023461179581675065414+0.0020395540604713669262j)
assert expint('1.01', '1e-1000').ae(99.9999999899412802)
assert expint('1.000000000001', 3.5).ae(0.00697013985754701819446)
assert expint(2,3).ae(3*ei(-3)+exp(-3))
assert (expint(10,20)*10**10).ae(0.694439055541231353)
assert expint(3,inf) == 0
assert expint(3.2,inf) == 0
assert expint(3.2+2j,inf) == 0
assert expint(1,3j).ae(-0.11962978600800032763 + 0.27785620120457163717j)
assert expint(1,3).ae(0.013048381094197037413)
assert expint(1,-3).ae(-ei(3)-pi*j)
#assert expint(3) == expint(1,3)
assert expint(1,-20).ae(-25615652.66405658882 - 3.1415926535897932385j)
assert expint(1000000,0).ae(1./999999)
assert expint(0,2+3j).ae(-0.025019798357114678171 + 0.027980439405104419040j)
assert expint(-1,2+3j).ae(-0.022411973626262070419 + 0.038058922011377716932j)
assert expint(-1.5,0) == inf
def test_trig_integrals():
mp.dps = 30
assert si(mpf(1)/1000000).ae('0.000000999999999999944444444444446111')
assert ci(mpf(1)/1000000).ae('-13.2382948930629912435014366276')
assert si(10**10).ae('1.5707963267075846569685111517747537')
assert ci(10**10).ae('-4.87506025174822653785729773959e-11')
assert si(10**100).ae(pi/2)
assert (ci(10**100)*10**100).ae('-0.372376123661276688262086695553')
assert si(-3) == -si(3)
assert ci(-3).ae(ci(3) + pi*j)
# Test complex structure
mp.dps = 15
assert mp.ci(50).ae(-0.0056283863241163054402)
assert mp.ci(50+2j).ae(-0.018378282946133067149+0.070352808023688336193j)
assert mp.ci(20j).ae(1.28078263320282943611e7+1.5707963267949j)
assert mp.ci(-2+20j).ae(-4.050116856873293505e6+1.207476188206989909e7j)
assert mp.ci(-50+2j).ae(-0.0183782829461330671+3.0712398455661049023j)
assert mp.ci(-50).ae(-0.0056283863241163054+3.1415926535897932385j)
assert mp.ci(-50-2j).ae(-0.0183782829461330671-3.0712398455661049023j)
assert mp.ci(-2-20j).ae(-4.050116856873293505e6-1.207476188206989909e7j)
assert mp.ci(-20j).ae(1.28078263320282943611e7-1.5707963267949j)
assert mp.ci(50-2j).ae(-0.018378282946133067149-0.070352808023688336193j)
assert mp.si(50).ae(1.5516170724859358947)
assert mp.si(50+2j).ae(1.497884414277228461-0.017515007378437448j)
assert mp.si(20j).ae(1.2807826332028294459e7j)
assert mp.si(-2+20j).ae(-1.20747603112735722103e7-4.050116856873293554e6j)
assert mp.si(-50+2j).ae(-1.497884414277228461-0.017515007378437448j)
assert mp.si(-50).ae(-1.5516170724859358947)
assert mp.si(-50-2j).ae(-1.497884414277228461+0.017515007378437448j)
assert mp.si(-2-20j).ae(-1.20747603112735722103e7+4.050116856873293554e6j)
assert mp.si(-20j).ae(-1.2807826332028294459e7j)
assert mp.si(50-2j).ae(1.497884414277228461+0.017515007378437448j)
assert mp.chi(50j).ae(-0.0056283863241163054+1.5707963267948966192j)
assert mp.chi(-2+50j).ae(-0.0183782829461330671+1.6411491348185849554j)
assert mp.chi(-20).ae(1.28078263320282943611e7+3.1415926535898j)
assert mp.chi(-20-2j).ae(-4.050116856873293505e6+1.20747571696809187053e7j)
assert mp.chi(-2-50j).ae(-0.0183782829461330671-1.6411491348185849554j)
assert mp.chi(-50j).ae(-0.0056283863241163054-1.5707963267948966192j)
assert mp.chi(2-50j).ae(-0.0183782829461330671-1.500443518771208283j)
assert mp.chi(20-2j).ae(-4.050116856873293505e6-1.20747603112735722951e7j)
assert mp.chi(20).ae(1.2807826332028294361e7)
assert mp.chi(2+50j).ae(-0.0183782829461330671+1.500443518771208283j)
assert mp.shi(50j).ae(1.5516170724859358947j)
assert mp.shi(-2+50j).ae(0.017515007378437448+1.497884414277228461j)
assert mp.shi(-20).ae(-1.2807826332028294459e7)
assert mp.shi(-20-2j).ae(4.050116856873293554e6-1.20747603112735722103e7j)
assert mp.shi(-2-50j).ae(0.017515007378437448-1.497884414277228461j)
assert mp.shi(-50j).ae(-1.5516170724859358947j)
assert mp.shi(2-50j).ae(-0.017515007378437448-1.497884414277228461j)
assert mp.shi(20-2j).ae(-4.050116856873293554e6-1.20747603112735722103e7j)
assert mp.shi(20).ae(1.2807826332028294459e7)
assert mp.shi(2+50j).ae(-0.017515007378437448+1.497884414277228461j)
def ae(x,y,tol=1e-12):
return abs(x-y) <= abs(y)*tol
assert fp.ci(fp.inf) == 0
assert ae(fp.ci(fp.ninf), fp.pi*1j)
assert ae(fp.si(fp.inf), fp.pi/2)
assert ae(fp.si(fp.ninf), -fp.pi/2)
assert fp.si(0) == 0
assert ae(fp.ci(50), -0.0056283863241163054402)
assert ae(fp.ci(50+2j), -0.018378282946133067149+0.070352808023688336193j)
assert ae(fp.ci(20j), 1.28078263320282943611e7+1.5707963267949j)
assert ae(fp.ci(-2+20j), -4.050116856873293505e6+1.207476188206989909e7j)
assert ae(fp.ci(-50+2j), -0.0183782829461330671+3.0712398455661049023j)
assert ae(fp.ci(-50), -0.0056283863241163054+3.1415926535897932385j)
assert ae(fp.ci(-50-2j), -0.0183782829461330671-3.0712398455661049023j)
assert ae(fp.ci(-2-20j), -4.050116856873293505e6-1.207476188206989909e7j)
assert ae(fp.ci(-20j), 1.28078263320282943611e7-1.5707963267949j)
assert ae(fp.ci(50-2j), -0.018378282946133067149-0.070352808023688336193j)
assert ae(fp.si(50), 1.5516170724859358947)
assert ae(fp.si(50+2j), 1.497884414277228461-0.017515007378437448j)
assert ae(fp.si(20j), 1.2807826332028294459e7j)
assert ae(fp.si(-2+20j), -1.20747603112735722103e7-4.050116856873293554e6j)
assert ae(fp.si(-50+2j), -1.497884414277228461-0.017515007378437448j)
assert ae(fp.si(-50), -1.5516170724859358947)
assert ae(fp.si(-50-2j), -1.497884414277228461+0.017515007378437448j)
assert ae(fp.si(-2-20j), -1.20747603112735722103e7+4.050116856873293554e6j)
assert ae(fp.si(-20j), -1.2807826332028294459e7j)
assert ae(fp.si(50-2j), 1.497884414277228461+0.017515007378437448j)
assert ae(fp.chi(50j), -0.0056283863241163054+1.5707963267948966192j)
assert ae(fp.chi(-2+50j), -0.0183782829461330671+1.6411491348185849554j)
assert ae(fp.chi(-20), 1.28078263320282943611e7+3.1415926535898j)
assert ae(fp.chi(-20-2j), -4.050116856873293505e6+1.20747571696809187053e7j)
assert ae(fp.chi(-2-50j), -0.0183782829461330671-1.6411491348185849554j)
assert ae(fp.chi(-50j), -0.0056283863241163054-1.5707963267948966192j)
assert ae(fp.chi(2-50j), -0.0183782829461330671-1.500443518771208283j)
assert ae(fp.chi(20-2j), -4.050116856873293505e6-1.20747603112735722951e7j)
assert ae(fp.chi(20), 1.2807826332028294361e7)
assert ae(fp.chi(2+50j), -0.0183782829461330671+1.500443518771208283j)
assert ae(fp.shi(50j), 1.5516170724859358947j)
assert ae(fp.shi(-2+50j), 0.017515007378437448+1.497884414277228461j)
assert ae(fp.shi(-20), -1.2807826332028294459e7)
assert ae(fp.shi(-20-2j), 4.050116856873293554e6-1.20747603112735722103e7j)
assert ae(fp.shi(-2-50j), 0.017515007378437448-1.497884414277228461j)
assert ae(fp.shi(-50j), -1.5516170724859358947j)
assert ae(fp.shi(2-50j), -0.017515007378437448-1.497884414277228461j)
assert ae(fp.shi(20-2j), -4.050116856873293554e6-1.20747603112735722103e7j)
assert ae(fp.shi(20), 1.2807826332028294459e7)
assert ae(fp.shi(2+50j), -0.017515007378437448+1.497884414277228461j)
def test_airy():
mp.dps = 15
assert (airyai(10)*10**10).ae(1.1047532552898687)
assert (airybi(10)/10**9).ae(0.45564115354822515)
assert (airyai(1000)*10**9158).ae(9.306933063179556004)
assert (airybi(1000)/10**9154).ae(5.4077118391949465477)
assert airyai(-1000).ae(0.055971895773019918842)
assert airybi(-1000).ae(-0.083264574117080633012)
assert (airyai(100+100j)*10**188).ae(2.9099582462207032076 + 2.353013591706178756j)
assert (airybi(100+100j)/10**185).ae(1.7086751714463652039 - 3.1416590020830804578j)
def test_hyper_0f1():
mp.dps = 15
v = 8.63911136507950465
assert hyper([],[(1,3)],1.5).ae(v)
assert hyper([],[1/3.],1.5).ae(v)
assert hyp0f1(1/3.,1.5).ae(v)
assert hyp0f1((1,3),1.5).ae(v)
# Asymptotic expansion
assert hyp0f1(3,1e9).ae('4.9679055380347771271e+27455')
assert hyp0f1(3,1e9j).ae('-2.1222788784457702157e+19410 + 5.0840597555401854116e+19410j')
def test_hyper_1f1():
mp.dps = 15
v = 1.2917526488617656673
assert hyper([(1,2)],[(3,2)],0.7).ae(v)
assert hyper([(1,2)],[(3,2)],0.7+0j).ae(v)
assert hyper([0.5],[(3,2)],0.7).ae(v)
assert hyper([0.5],[1.5],0.7).ae(v)
assert hyper([0.5],[(3,2)],0.7+0j).ae(v)
assert hyper([0.5],[1.5],0.7+0j).ae(v)
assert hyper([(1,2)],[1.5+0j],0.7).ae(v)
assert hyper([0.5+0j],[1.5],0.7).ae(v)
assert hyper([0.5+0j],[1.5+0j],0.7+0j).ae(v)
assert hyp1f1(0.5,1.5,0.7).ae(v)
assert hyp1f1((1,2),1.5,0.7).ae(v)
# Asymptotic expansion
assert hyp1f1(2,3,1e10).ae('2.1555012157015796988e+4342944809')
assert (hyp1f1(2,3,1e10j)*10**10).ae(-0.97501205020039745852 - 1.7462392454512132074j)
# Shouldn't use asymptotic expansion
assert hyp1f1(-2, 1, 10000).ae(49980001)
# Bug
assert hyp1f1(1j,fraction(1,3),0.415-69.739j).ae(25.857588206024346592 + 15.738060264515292063j)
def test_hyper_2f1():
mp.dps = 15
v = 1.0652207633823291032
assert hyper([(1,2), (3,4)], [2], 0.3).ae(v)
assert hyper([(1,2), 0.75], [2], 0.3).ae(v)
assert hyper([0.5, 0.75], [2.0], 0.3).ae(v)
assert hyper([0.5, 0.75], [2.0], 0.3+0j).ae(v)
assert hyper([0.5+0j, (3,4)], [2.0], 0.3+0j).ae(v)
assert hyper([0.5+0j, (3,4)], [2.0], 0.3).ae(v)
assert hyper([0.5, (3,4)], [2.0+0j], 0.3).ae(v)
assert hyper([0.5+0j, 0.75+0j], [2.0+0j], 0.3+0j).ae(v)
v = 1.09234681096223231717 + 0.18104859169479360380j
assert hyper([(1,2),0.75+j], [2], 0.5).ae(v)
assert hyper([0.5,0.75+j], [2.0], 0.5).ae(v)
assert hyper([0.5,0.75+j], [2.0], 0.5+0j).ae(v)
assert hyper([0.5,0.75+j], [2.0+0j], 0.5+0j).ae(v)
v = 0.9625 - 0.125j
assert hyper([(3,2),-1],[4], 0.1+j/3).ae(v)
assert hyper([1.5,-1.0],[4], 0.1+j/3).ae(v)
assert hyper([1.5,-1.0],[4+0j], 0.1+j/3).ae(v)
assert hyper([1.5+0j,-1.0+0j],[4+0j], 0.1+j/3).ae(v)
v = 1.02111069501693445001 - 0.50402252613466859521j
assert hyper([(2,10),(3,10)],[(4,10)],1.5).ae(v)
assert hyper([0.2,(3,10)],[0.4+0j],1.5).ae(v)
assert hyper([0.2,(3,10)],[0.4+0j],1.5+0j).ae(v)
v = 0.76922501362865848528 + 0.32640579593235886194j
assert hyper([(2,10),(3,10)],[(4,10)],4+2j).ae(v)
assert hyper([0.2,(3,10)],[0.4+0j],4+2j).ae(v)
assert hyper([0.2,(3,10)],[(4,10)],4+2j).ae(v)
def test_hyper_2f1_hard():
mp.dps = 15
# Singular cases
assert hyp2f1(2,-1,-1,3).ae(7)
assert hyp2f1(2,-1,-1,3,eliminate_all=True).ae(0.25)
assert hyp2f1(2,-2,-2,3).ae(34)
assert hyp2f1(2,-2,-2,3,eliminate_all=True).ae(0.25)
assert hyp2f1(2,-2,-3,3) == 14
assert hyp2f1(2,-3,-2,3) == inf
assert hyp2f1(2,-1.5,-1.5,3) == 0.25
assert hyp2f1(1,2,3,0) == 1
assert hyp2f1(0,1,0,0) == 1
assert hyp2f1(0,0,0,0) == 1
assert isnan(hyp2f1(1,1,0,0))
assert hyp2f1(2,-1,-5, 0.25+0.25j).ae(1.1+0.1j)
assert hyp2f1(2,-5,-5, 0.25+0.25j, eliminate=False).ae(163./128 + 125./128*j)
assert hyp2f1(0.7235, -1, -5, 0.3).ae(1.04341)
assert hyp2f1(0.7235, -5, -5, 0.3, eliminate=False).ae(1.2939225017815903812)
assert hyp2f1(-1,-2,4,1) == 1.5
assert hyp2f1(1,2,-3,1) == inf
assert hyp2f1(-2,-2,1,1) == 6
assert hyp2f1(1,-2,-4,1).ae(5./3)
assert hyp2f1(0,-6,-4,1) == 1
assert hyp2f1(0,-3,-4,1) == 1
assert hyp2f1(0,0,0,1) == 1
assert hyp2f1(1,0,0,1,eliminate=False) == 1
assert hyp2f1(1,1,0,1) == inf
assert hyp2f1(1,-6,-4,1) == inf
assert hyp2f1(-7.2,-0.5,-4.5,1) == 0
assert hyp2f1(-7.2,-1,-2,1).ae(-2.6)
assert hyp2f1(1,-0.5,-4.5, 1) == inf
assert hyp2f1(1,0.5,-4.5, 1) == -inf
# Check evaluation on / close to unit circle
z = exp(j*pi/3)
w = (nthroot(2,3)+1)*exp(j*pi/12)/nthroot(3,4)**3
assert hyp2f1('1/2','1/6','1/3', z).ae(w)
assert hyp2f1('1/2','1/6','1/3', z.conjugate()).ae(w.conjugate())
assert hyp2f1(0.25, (1,3), 2, '0.999').ae(1.06826449496030635)
assert hyp2f1(0.25, (1,3), 2, '1.001').ae(1.06867299254830309446-0.00001446586793975874j)
assert hyp2f1(0.25, (1,3), 2, -1).ae(0.96656584492524351673)
assert hyp2f1(0.25, (1,3), 2, j).ae(0.99041766248982072266+0.03777135604180735522j)
assert hyp2f1(2,3,5,'0.99').ae(27.699347904322690602)
assert hyp2f1((3,2),-0.5,3,'0.99').ae(0.68403036843911661388)
assert hyp2f1(2,3,5,1j).ae(0.37290667145974386127+0.59210004902748285917j)
assert fsum([hyp2f1((7,10),(2,3),(-1,2), 0.95*exp(j*k)) for k in range(1,15)]).ae(52.851400204289452922+6.244285013912953225j)
assert fsum([hyp2f1((7,10),(2,3),(-1,2), 1.05*exp(j*k)) for k in range(1,15)]).ae(54.506013786220655330-3.000118813413217097j)
assert fsum([hyp2f1((7,10),(2,3),(-1,2), exp(j*k)) for k in range(1,15)]).ae(55.792077935955314887+1.731986485778500241j)
assert hyp2f1(2,2.5,-3.25,0.999).ae(218373932801217082543180041.33)
# Branches
assert hyp2f1(1,1,2,1.01).ae(4.5595744415723676911-3.1104877758314784539j)
assert hyp2f1(1,1,2,1.01+0.1j).ae(2.4149427480552782484+1.4148224796836938829j)
assert hyp2f1(1,1,2,3+4j).ae(0.14576709331407297807+0.48379185417980360773j)
assert hyp2f1(1,1,2,4).ae(-0.27465307216702742285 - 0.78539816339744830962j)
assert hyp2f1(1,1,2,-4).ae(0.40235947810852509365)
# Other:
# Cancellation with a large parameter involved (bug reported on sage-devel)
assert hyp2f1(112, (51,10), (-9,10), -0.99999).ae(-1.6241361047970862961e-24, abs_eps=0, rel_eps=eps*16)
def test_hyper_3f2_etc():
assert hyper([1,2,3],[1.5,8],-1).ae(0.67108992351533333030)
assert hyper([1,2,3,4],[5,6,7], -1).ae(0.90232988035425506008)
assert hyper([1,2,3],[1.25,5], 1).ae(28.924181329701905701)
assert hyper([1,2,3,4],[5,6,7],5).ae(1.5192307344006649499-1.1529845225075537461j)
assert hyper([1,2,3,4,5],[6,7,8,9],-1).ae(0.96288759462882357253)
assert hyper([1,2,3,4,5],[6,7,8,9],1).ae(1.0428697385885855841)
assert hyper([1,2,3,4,5],[6,7,8,9],5).ae(1.33980653631074769423-0.07143405251029226699j)
assert hyper([1,2.79,3.08,4.37],[5.2,6.1,7.3],5).ae(1.0996321464692607231-1.7748052293979985001j)
assert hyper([1,1,1],[1,2],1) == inf
assert hyper([1,1,1],[2,(101,100)],1).ae(100.01621213528313220)
# slow -- covered by doctests
#assert hyper([1,1,1],[2,3],0.9999).ae(1.2897972005319693905)
def test_hyper_u():
mp.dps = 15
assert hyperu(2,-3,0).ae(0.05)
assert hyperu(2,-3.5,0).ae(4./99)
assert hyperu(2,0,0) == 0.5
assert hyperu(-5,1,0) == -120
assert hyperu(-5,2,0) == inf
assert hyperu(-5,-2,0) == 0
assert hyperu(7,7,3).ae(0.00014681269365593503986) #exp(3)*gammainc(-6,3)
assert hyperu(2,-3,4).ae(0.011836478100271995559)
assert hyperu(3,4,5).ae(1./125)
assert hyperu(2,3,0.0625) == 256
assert hyperu(-1,2,0.25+0.5j) == -1.75+0.5j
assert hyperu(0.5,1.5,7.25).ae(2/sqrt(29))
assert hyperu(2,6,pi).ae(0.55804439825913399130)
assert (hyperu((3,2),8,100+201j)*10**4).ae(-0.3797318333856738798 - 2.9974928453561707782j)
assert (hyperu((5,2),(-1,2),-5000)*10**10).ae(-5.6681877926881664678j)
# XXX: fails because of undetected cancellation in low level series code
# Alternatively: could use asymptotic series here, if convergence test
# tweaked back to recognize this one
#assert (hyperu((5,2),(-1,2),-500)*10**7).ae(-1.82526906001593252847j)
def test_hyper_2f0():
mp.dps = 15
assert hyper([1,2],[],3) == hyp2f0(1,2,3)
assert hyp2f0(2,3,7).ae(0.0116108068639728714668 - 0.0073727413865865802130j)
assert hyp2f0(2,3,0) == 1
assert hyp2f0(0,0,0) == 1
assert hyp2f0(-1,-1,1).ae(2)
assert hyp2f0(-4,1,1.5).ae(62.5)
assert hyp2f0(-4,1,50).ae(147029801)
assert hyp2f0(-4,1,0.0001).ae(0.99960011997600240000)
assert hyp2f0(0.5,0.25,0.001).ae(1.0001251174078538115)
assert hyp2f0(0.5,0.25,3+4j).ae(0.85548875824755163518 + 0.21636041283392292973j)
# Important: cancellation check
assert hyp2f0((1,6),(5,6),-0.02371708245126284498).ae(0.996785723120804309)
# Should be exact; polynomial case
assert hyp2f0(-2,1,0.5+0.5j,zeroprec=200) == 0
assert hyp2f0(1,-2,0.5+0.5j,zeroprec=200) == 0
# There used to be a bug in thresholds that made one of the following hang
for d in [15, 50, 80]:
mp.dps = d
assert hyp2f0(1.5, 0.5, 0.009).ae('1.006867007239309717945323585695344927904000945829843527398772456281301440034218290443367270629519483 + 1.238277162240704919639384945859073461954721356062919829456053965502443570466701567100438048602352623e-46j')
def test_hyper_1f2():
mp.dps = 15
assert hyper([1],[2,3],4) == hyp1f2(1,2,3,4)
a1,b1,b2 = (1,10),(2,3),1./16
assert hyp1f2(a1,b1,b2,10).ae(298.7482725554557568)
assert hyp1f2(a1,b1,b2,100).ae(224128961.48602947604)
assert hyp1f2(a1,b1,b2,1000).ae(1.1669528298622675109e+27)
assert hyp1f2(a1,b1,b2,10000).ae(2.4780514622487212192e+86)
assert hyp1f2(a1,b1,b2,100000).ae(1.3885391458871523997e+274)
assert hyp1f2(a1,b1,b2,1000000).ae('9.8851796978960318255e+867')
assert hyp1f2(a1,b1,b2,10**7).ae('1.1505659189516303646e+2746')
assert hyp1f2(a1,b1,b2,10**8).ae('1.4672005404314334081e+8685')
assert hyp1f2(a1,b1,b2,10**20).ae('3.6888217332150976493e+8685889636')
assert hyp1f2(a1,b1,b2,10*j).ae(-16.163252524618572878 - 44.321567896480184312j)
assert hyp1f2(a1,b1,b2,100*j).ae(61938.155294517848171 + 637349.45215942348739j)
assert hyp1f2(a1,b1,b2,1000*j).ae(8455057657257695958.7 + 6261969266997571510.6j)
assert hyp1f2(a1,b1,b2,10000*j).ae(-8.9771211184008593089e+60 + 4.6550528111731631456e+59j)
assert hyp1f2(a1,b1,b2,100000*j).ae(2.6398091437239324225e+193 + 4.1658080666870618332e+193j)
assert hyp1f2(a1,b1,b2,1000000*j).ae('3.5999042951925965458e+613 + 1.5026014707128947992e+613j')
assert hyp1f2(a1,b1,b2,10**7*j).ae('-8.3208715051623234801e+1939 - 3.6752883490851869429e+1941j')
assert hyp1f2(a1,b1,b2,10**8*j).ae('2.0724195707891484454e+6140 - 1.3276619482724266387e+6141j')
assert hyp1f2(a1,b1,b2,10**20*j).ae('-1.1734497974795488504e+6141851462 + 1.1498106965385471542e+6141851462j')
def test_hyper_2f3():
mp.dps = 15
assert hyper([1,2],[3,4,5],6) == hyp2f3(1,2,3,4,5,6)
a1,a2,b1,b2,b3 = (1,10),(2,3),(3,10), 2, 1./16
# Check asymptotic expansion
assert hyp2f3(a1,a2,b1,b2,b3,10).ae(128.98207160698659976)
assert hyp2f3(a1,a2,b1,b2,b3,1000).ae(6.6309632883131273141e25)
assert hyp2f3(a1,a2,b1,b2,b3,10000).ae(4.6863639362713340539e84)
assert hyp2f3(a1,a2,b1,b2,b3,100000).ae(8.6632451236103084119e271)
assert hyp2f3(a1,a2,b1,b2,b3,10**6).ae('2.0291718386574980641e865')
assert hyp2f3(a1,a2,b1,b2,b3,10**7).ae('7.7639836665710030977e2742')
assert hyp2f3(a1,a2,b1,b2,b3,10**8).ae('3.2537462584071268759e8681')
assert hyp2f3(a1,a2,b1,b2,b3,10**20).ae('1.2966030542911614163e+8685889627')
assert hyp2f3(a1,a2,b1,b2,b3,10*j).ae(-18.551602185587547854 - 13.348031097874113552j)
assert hyp2f3(a1,a2,b1,b2,b3,100*j).ae(78634.359124504488695 + 74459.535945281973996j)
assert hyp2f3(a1,a2,b1,b2,b3,1000*j).ae(597682550276527901.59 - 65136194809352613.078j)
assert hyp2f3(a1,a2,b1,b2,b3,10000*j).ae(-1.1779696326238582496e+59 + 1.2297607505213133872e+59j)
assert hyp2f3(a1,a2,b1,b2,b3,100000*j).ae(2.9844228969804380301e+191 + 7.5587163231490273296e+190j)
assert hyp2f3(a1,a2,b1,b2,b3,1000000*j).ae('7.4859161049322370311e+610 - 2.8467477015940090189e+610j')
assert hyp2f3(a1,a2,b1,b2,b3,10**7*j).ae('-1.7477645579418800826e+1938 - 1.7606522995808116405e+1938j')
assert hyp2f3(a1,a2,b1,b2,b3,10**8*j).ae('-1.6932731942958401784e+6137 - 2.4521909113114629368e+6137j')
assert hyp2f3(a1,a2,b1,b2,b3,10**20*j).ae('-2.0988815677627225449e+6141851451 + 5.7708223542739208681e+6141851452j')
def test_hyper_2f2():
mp.dps = 15
assert hyper([1,2],[3,4],5) == hyp2f2(1,2,3,4,5)
a1,a2,b1,b2 = (3,10),4,(1,2),1./16
assert hyp2f2(a1,a2,b1,b2,10).ae(448225936.3377556696)
assert hyp2f2(a1,a2,b1,b2,10000).ae('1.2012553712966636711e+4358')
assert hyp2f2(a1,a2,b1,b2,-20000).ae(-0.04182343755661214626)
assert hyp2f2(a1,a2,b1,b2,10**20).ae('1.1148680024303263661e+43429448190325182840')
def test_orthpoly():
mp.dps = 15
assert jacobi(-4,2,3,0.7).ae(22800./4913)
assert jacobi(3,2,4,5.5) == 4133.125
assert jacobi(1.5,5/6.,4,0).ae(-1.0851951434075508417)
assert jacobi(-2, 1, 2, 4).ae(-0.16)
assert jacobi(2, -1, 2.5, 4).ae(34.59375)
#assert jacobi(2, -1, 2, 4) == 28.5
assert legendre(5, 7) == 129367
assert legendre(0.5,0).ae(0.53935260118837935667)
assert legendre(-1,-1) == 1
assert legendre(0,-1) == 1
assert legendre(0, 1) == 1
assert legendre(1, -1) == -1
assert legendre(7, 1) == 1
assert legendre(7, -1) == -1
assert legendre(8,1.5).ae(15457523./32768)
assert legendre(j,-j).ae(2.4448182735671431011 + 0.6928881737669934843j)
assert chebyu(5,1) == 6
assert chebyt(3,2) == 26
assert legendre(3.5,-1) == inf
assert legendre(4.5,-1) == -inf
assert legendre(3.5+1j,-1) == mpc(inf,inf)
assert legendre(4.5+1j,-1) == mpc(-inf,-inf)
assert laguerre(4, -2, 3).ae(-1.125)
assert laguerre(3, 1+j, 0.5).ae(0.2291666666666666667 + 2.5416666666666666667j)
def test_hermite():
mp.dps = 15
assert hermite(-2, 0).ae(0.5)
assert hermite(-1, 0).ae(0.88622692545275801365)
assert hermite(0, 0).ae(1)
assert hermite(1, 0) == 0
assert hermite(2, 0).ae(-2)
assert hermite(0, 2).ae(1)
assert hermite(1, 2).ae(4)
assert hermite(1, -2).ae(-4)
assert hermite(2, -2).ae(14)
assert hermite(0.5, 0).ae(0.69136733903629335053)
assert hermite(9, 0) == 0
assert hermite(4,4).ae(3340)
assert hermite(3,4).ae(464)
assert hermite(-4,4).ae(0.00018623860287512396181)
assert hermite(-3,4).ae(0.0016540169879668766270)
assert hermite(9, 2.5j).ae(13638725j)
assert hermite(9, -2.5j).ae(-13638725j)
assert hermite(9, 100).ae(511078883759363024000)
assert hermite(9, -100).ae(-511078883759363024000)
assert hermite(9, 100j).ae(512922083920643024000j)
assert hermite(9, -100j).ae(-512922083920643024000j)
assert hermite(-9.5, 2.5j).ae(-2.9004951258126778174e-6 + 1.7601372934039951100e-6j)
assert hermite(-9.5, -2.5j).ae(-2.9004951258126778174e-6 - 1.7601372934039951100e-6j)
assert hermite(-9.5, 100).ae(1.3776300722767084162e-22, abs_eps=0, rel_eps=eps)
assert hermite(-9.5, -100).ae('1.3106082028470671626e4355')
assert hermite(-9.5, 100j).ae(-9.7900218581864768430e-23 - 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps)
assert hermite(-9.5, -100j).ae(-9.7900218581864768430e-23 + 9.7900218581864768430e-23j, abs_eps=0, rel_eps=eps)
assert hermite(2+3j, -1-j).ae(851.3677063883687676 - 1496.4373467871007997j)
def test_gegenbauer():
mp.dps = 15
assert gegenbauer(1,2,3).ae(12)
assert gegenbauer(2,3,4).ae(381)
assert gegenbauer(0,0,0) == 0
assert gegenbauer(2,-1,3) == 0
assert gegenbauer(-7, 0.5, 3).ae(8989)
assert gegenbauer(1, -0.5, 3).ae(-3)
assert gegenbauer(1, -1.5, 3).ae(-9)
assert gegenbauer(1, -0.5, 3).ae(-3)
assert gegenbauer(-0.5, -0.5, 3).ae(-2.6383553159023906245)
assert gegenbauer(2+3j, 1-j, 3+4j).ae(14.880536623203696780 + 20.022029711598032898j)
#assert gegenbauer(-2, -0.5, 3).ae(-12)
def test_legenp():
mp.dps = 15
assert legenp(2,0,4) == legendre(2,4)
assert legenp(-2, -1, 0.5).ae(0.43301270189221932338)
assert legenp(-2, -1, 0.5, type=3).ae(0.43301270189221932338j)
assert legenp(-2, 1, 0.5).ae(-0.86602540378443864676)
assert legenp(2+j, 3+4j, -j).ae(134742.98773236786148 + 429782.72924463851745j)
assert legenp(2+j, 3+4j, -j, type=3).ae(802.59463394152268507 - 251.62481308942906447j)
assert legenp(2,4,3).ae(0)
assert legenp(2,4,3,type=3).ae(0)
assert legenp(2,1,0.5).ae(-1.2990381056766579701)
assert legenp(2,1,0.5,type=3).ae(1.2990381056766579701j)
assert legenp(3,2,3).ae(-360)
assert legenp(3,3,3).ae(240j*2**0.5)
assert legenp(3,4,3).ae(0)
assert legenp(0,0.5,2).ae(0.52503756790433198939 - 0.52503756790433198939j)
assert legenp(-1,-0.5,2).ae(0.60626116232846498110 + 0.60626116232846498110j)
assert legenp(-2,0.5,2).ae(1.5751127037129959682 - 1.5751127037129959682j)
assert legenp(-2,0.5,-0.5).ae(-0.85738275810499171286)
def test_legenq():
mp.dps = 15
f = legenq
# Evaluation at poles
assert isnan(f(3,2,1))
assert isnan(f(3,2,-1))
assert isnan(f(3,2,1,type=3))
assert isnan(f(3,2,-1,type=3))
# Evaluation at 0
assert f(0,1,0,type=2).ae(-1)
assert f(-2,2,0,type=2,zeroprec=200).ae(0)
assert f(1.5,3,0,type=2).ae(-2.2239343475841951023)
assert f(0,1,0,type=3).ae(j)
assert f(-2,2,0,type=3,zeroprec=200).ae(0)
assert f(1.5,3,0,type=3).ae(2.2239343475841951022*(1-1j))
# Standard case, degree 0
assert f(0,0,-1.5).ae(-0.8047189562170501873 + 1.5707963267948966192j)
assert f(0,0,-0.5).ae(-0.54930614433405484570)
assert f(0,0,0,zeroprec=200).ae(0)
assert f(0,0,0.5).ae(0.54930614433405484570)
assert f(0,0,1.5).ae(0.8047189562170501873 - 1.5707963267948966192j)
assert f(0,0,-1.5,type=3).ae(-0.80471895621705018730)
assert f(0,0,-0.5,type=3).ae(-0.5493061443340548457 - 1.5707963267948966192j)
assert f(0,0,0,type=3).ae(-1.5707963267948966192j)
assert f(0,0,0.5,type=3).ae(0.5493061443340548457 - 1.5707963267948966192j)
assert f(0,0,1.5,type=3).ae(0.80471895621705018730)
# Standard case, degree 1
assert f(1,0,-1.5).ae(0.2070784343255752810 - 2.3561944901923449288j)
assert f(1,0,-0.5).ae(-0.72534692783297257715)
assert f(1,0,0).ae(-1)
assert f(1,0,0.5).ae(-0.72534692783297257715)
assert f(1,0,1.5).ae(0.2070784343255752810 - 2.3561944901923449288j)
# Standard case, degree 2
assert f(2,0,-1.5).ae(-0.0635669991240192885 + 4.5160394395353277803j)
assert f(2,0,-0.5).ae(0.81866326804175685571)
assert f(2,0,0,zeroprec=200).ae(0)
assert f(2,0,0.5).ae(-0.81866326804175685571)
assert f(2,0,1.5).ae(0.0635669991240192885 - 4.5160394395353277803j)
# Misc orders and degrees
assert f(2,3,1.5,type=2).ae(-5.7243340223994616228j)
assert f(2,3,1.5,type=3).ae(-5.7243340223994616228)
assert f(2,3,0.5,type=2).ae(-12.316805742712016310)
assert f(2,3,0.5,type=3).ae(-12.316805742712016310j)
assert f(2,3,-1.5,type=2).ae(-5.7243340223994616228j)
assert f(2,3,-1.5,type=3).ae(5.7243340223994616228)
assert f(2,3,-0.5,type=2).ae(-12.316805742712016310)
assert f(2,3,-0.5,type=3).ae(-12.316805742712016310j)
assert f(2+3j, 3+4j, 0.5, type=3).ae(0.0016119404873235186807 - 0.0005885900510718119836j)
assert f(2+3j, 3+4j, -1.5, type=3).ae(0.008451400254138808670 + 0.020645193304593235298j)
assert f(-2.5,1,-1.5).ae(3.9553395527435335749j)
assert f(-2.5,1,-0.5).ae(1.9290561746445456908)
assert f(-2.5,1,0).ae(1.2708196271909686299)
assert f(-2.5,1,0.5).ae(-0.31584812990742202869)
assert f(-2.5,1,1.5).ae(-3.9553395527435335742 + 0.2993235655044701706j)
assert f(-2.5,1,-1.5,type=3).ae(0.29932356550447017254j)
assert f(-2.5,1,-0.5,type=3).ae(-0.3158481299074220287 - 1.9290561746445456908j)
assert f(-2.5,1,0,type=3).ae(1.2708196271909686292 - 1.2708196271909686299j)
assert f(-2.5,1,0.5,type=3).ae(1.9290561746445456907 + 0.3158481299074220287j)
assert f(-2.5,1,1.5,type=3).ae(-0.29932356550447017254)
def test_agm():
mp.dps = 15
assert agm(0,0) == 0
assert agm(0,1) == 0
assert agm(1,1) == 1
assert agm(7,7) == 7
assert agm(j,j) == j
assert (1/agm(1,sqrt(2))).ae(0.834626841674073186)
assert agm(1,2).ae(1.4567910310469068692)
assert agm(1,3).ae(1.8636167832448965424)
assert agm(1,j).ae(0.599070117367796104+0.599070117367796104j)
assert agm(2) == agm(1,2)
assert agm(-3,4).ae(0.63468509766550907+1.3443087080896272j)
def test_gammainc():
mp.dps = 15
assert gammainc(2,5).ae(6*exp(-5))
assert gammainc(2,0,5).ae(1-6*exp(-5))
assert gammainc(2,3,5).ae(-6*exp(-5)+4*exp(-3))
assert gammainc(-2.5,-0.5).ae(-0.9453087204829418812-5.3164237738936178621j)
assert gammainc(0,2,4).ae(0.045121158298212213088)
assert gammainc(0,3).ae(0.013048381094197037413)
assert gammainc(0,2+j,1-j).ae(0.00910653685850304839-0.22378752918074432574j)
assert gammainc(0,1-j).ae(0.00028162445198141833+0.17932453503935894015j)
assert gammainc(3,4,5,True).ae(0.11345128607046320253)
assert gammainc(3.5,0,inf).ae(gamma(3.5))
assert gammainc(-150.5,500).ae('6.9825435345798951153e-627')
assert gammainc(-150.5,800).ae('4.6885137549474089431e-788')
assert gammainc(-3.5, -20.5).ae(0.27008820585226911 - 1310.31447140574997636j)
assert gammainc(-3.5, -200.5).ae(0.27008820585226911 - 5.3264597096208368435e76j) # XXX real part
assert gammainc(0,0,2) == inf
assert gammainc(1,b=1).ae(0.6321205588285576784)
assert gammainc(3,2,2) == 0
assert gammainc(2,3+j,3-j).ae(-0.28135485191849314194j)
assert gammainc(4+0j,1).ae(5.8860710587430771455)
# GH issue #301
assert gammainc(-1,-1).ae(-0.8231640121031084799 + 3.1415926535897932385j)
assert gammainc(-2,-1).ae(1.7707229202810768576 - 1.5707963267948966192j)
assert gammainc(-3,-1).ae(-1.4963349162467073643 + 0.5235987755982988731j)
assert gammainc(-4,-1).ae(1.05365418617643814992 - 0.13089969389957471827j)
# Regularized upper gamma
assert isnan(gammainc(0, 0, regularized=True))
assert gammainc(-1, 0, regularized=True) == inf
assert gammainc(1, 0, regularized=True) == 1
assert gammainc(0, 5, regularized=True) == 0
assert gammainc(0, 2+3j, regularized=True) == 0
assert gammainc(0, 5000, regularized=True) == 0
assert gammainc(0, 10**30, regularized=True) == 0
assert gammainc(-1, 5, regularized=True) == 0
assert gammainc(-1, 5000, regularized=True) == 0
assert gammainc(-1, 10**30, regularized=True) == 0
assert gammainc(-1, -5, regularized=True) == 0
assert gammainc(-1, -5000, regularized=True) == 0
assert gammainc(-1, -10**30, regularized=True) == 0
assert gammainc(-1, 3+4j, regularized=True) == 0
assert gammainc(1, 5, regularized=True).ae(exp(-5))
assert gammainc(1, 5000, regularized=True).ae(exp(-5000))
assert gammainc(1, 10**30, regularized=True).ae(exp(-10**30))
assert gammainc(1, 3+4j, regularized=True).ae(exp(-3-4j))
assert gammainc(-1000000,2).ae('1.3669297209397347754e-301037', abs_eps=0, rel_eps=8*eps)
assert gammainc(-1000000,2,regularized=True) == 0
assert gammainc(-1000000,3+4j).ae('-1.322575609404222361e-698979 - 4.9274570591854533273e-698978j', abs_eps=0, rel_eps=8*eps)
assert gammainc(-1000000,3+4j,regularized=True) == 0
assert gammainc(2+3j, 4+5j, regularized=True).ae(0.085422013530993285774-0.052595379150390078503j)
assert gammainc(1000j, 1000j, regularized=True).ae(0.49702647628921131761 + 0.00297355675013575341j)
# Generalized
assert gammainc(3,4,2) == -gammainc(3,2,4)
assert gammainc(4, 2, 3).ae(1.2593494302978947396)
assert gammainc(4, 2, 3, regularized=True).ae(0.20989157171631578993)
assert gammainc(0, 2, 3).ae(0.035852129613864082155)
assert gammainc(0, 2, 3, regularized=True) == 0
assert gammainc(-1, 2, 3).ae(0.015219822548487616132)
assert gammainc(-1, 2, 3, regularized=True) == 0
assert gammainc(0, 2, 3).ae(0.035852129613864082155)
assert gammainc(0, 2, 3, regularized=True) == 0
# Should use upper gammas
assert gammainc(5, 10000, 12000).ae('1.1359381951461801687e-4327', abs_eps=0, rel_eps=8*eps)
# Should use lower gammas
assert gammainc(10000, 2, 3).ae('8.1244514125995785934e4765')
# GH issue 306
assert gammainc(3,-1-1j) == 0
assert gammainc(3,-1+1j) == 0
assert gammainc(2,-1) == 0
assert gammainc(2,-1+0j) == 0
assert gammainc(2+0j,-1) == 0
def test_gammainc_expint_n():
# These tests are intended to check all cases of the low-level code
# for upper gamma and expint with small integer index.
# Need to cover positive/negative arguments; small/large/huge arguments
# for both positive and negative indices, as well as indices 0 and 1
# which may be special-cased
mp.dps = 15
assert expint(-3,3.5).ae(0.021456366563296693987)
assert expint(-2,3.5).ae(0.014966633183073309405)
assert expint(-1,3.5).ae(0.011092916359219041088)
assert expint(0,3.5).ae(0.0086278238349481430685)
assert expint(1,3.5).ae(0.0069701398575483929193)
assert expint(2,3.5).ae(0.0058018939208991255223)
assert expint(3,3.5).ae(0.0049453773495857807058)
assert expint(-3,-3.5).ae(-4.6618170604073311319)
assert expint(-2,-3.5).ae(-5.5996974157555515963)
assert expint(-1,-3.5).ae(-6.7582555017739415818)
assert expint(0,-3.5).ae(-9.4615577024835182145)
assert expint(1,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j)
assert expint(2,-3.5).ae(-15.62328702434085977 - 10.995574287564276335j)
assert expint(3,-3.5).ae(-10.783026313250347722 - 19.242255003237483586j)
assert expint(-3,350).ae(2.8614825451252838069e-155, abs_eps=0, rel_eps=8*eps)
assert expint(-2,350).ae(2.8532837224504675901e-155, abs_eps=0, rel_eps=8*eps)
assert expint(-1,350).ae(2.8451316155828634555e-155, abs_eps=0, rel_eps=8*eps)
assert expint(0,350).ae(2.8370258275042797989e-155, abs_eps=0, rel_eps=8*eps)
assert expint(1,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps)
assert expint(2,350).ae(2.8209516419468505006e-155, abs_eps=0, rel_eps=8*eps)
assert expint(3,350).ae(2.8129824725501272171e-155, abs_eps=0, rel_eps=8*eps)
assert expint(-3,-350).ae(-2.8528796154044839443e+149)
assert expint(-2,-350).ae(-2.8610072121701264351e+149)
assert expint(-1,-350).ae(-2.8691813842677537647e+149)
assert expint(0,-350).ae(-2.8774025343659421709e+149)
u = expint(1,-350)
assert u.ae(-2.8856710698020863568e+149)
assert u.imag.ae(-3.1415926535897932385)
u = expint(2,-350)
assert u.ae(-2.8939874026504650534e+149)
assert u.imag.ae(-1099.5574287564276335)
u = expint(3,-350)
assert u.ae(-2.9023519497915044349e+149)
assert u.imag.ae(-192422.55003237483586)
assert expint(-3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(-2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(-1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(1,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(2,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(3,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert expint(-3,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
assert expint(-2,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
assert expint(-1,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
assert expint(0,-350000000000000000000000).ae('-3.7805306852415755699e+152003068666138139677871')
u = expint(1,-350000000000000000000000)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
assert u.imag.ae(-3.1415926535897932385)
u = expint(2,-350000000000000000000000)
assert u.imag.ae(-1.0995574287564276335e+24)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
u = expint(3,-350000000000000000000000)
assert u.imag.ae(-1.9242255003237483586e+47)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
# Small case; no branch cut
assert gammainc(-3,3.5).ae(0.00010020262545203707109)
assert gammainc(-2,3.5).ae(0.00040370427343557393517)
assert gammainc(-1,3.5).ae(0.0016576839773997501492)
assert gammainc(0,3.5).ae(0.0069701398575483929193)
assert gammainc(1,3.5).ae(0.03019738342231850074)
assert gammainc(2,3.5).ae(0.13588822540043325333)
assert gammainc(3,3.5).ae(0.64169439772426814072)
# Small case; with branch cut
assert gammainc(-3,-3.5).ae(0.03595832954467563286 + 0.52359877559829887308j)
assert gammainc(-2,-3.5).ae(-0.88024704597962022221 - 1.5707963267948966192j)
assert gammainc(-1,-3.5).ae(4.4637962926688170771 + 3.1415926535897932385j)
assert gammainc(0,-3.5).ae(-13.925353995152335292 - 3.1415926535897932385j)
assert gammainc(1,-3.5).ae(33.115451958692313751)
assert gammainc(2,-3.5).ae(-82.788629896730784377)
assert gammainc(3,-3.5).ae(240.08702670051927469)
# Asymptotic case; no branch cut
assert gammainc(-3,350).ae(6.5424095113340358813e-163, abs_eps=0, rel_eps=8*eps)
assert gammainc(-2,350).ae(2.296312222489899769e-160, abs_eps=0, rel_eps=8*eps)
assert gammainc(-1,350).ae(8.059861834133858573e-158, abs_eps=0, rel_eps=8*eps)
assert gammainc(0,350).ae(2.8289659656701459404e-155, abs_eps=0, rel_eps=8*eps)
assert gammainc(1,350).ae(9.9295903962649792963e-153, abs_eps=0, rel_eps=8*eps)
assert gammainc(2,350).ae(3.485286229089007733e-150, abs_eps=0, rel_eps=8*eps)
assert gammainc(3,350).ae(1.2233453960006379793e-147, abs_eps=0, rel_eps=8*eps)
# Asymptotic case; branch cut
u = gammainc(-3,-350)
assert u.ae(6.7889565783842895085e+141)
assert u.imag.ae(0.52359877559829887308)
u = gammainc(-2,-350)
assert u.ae(-2.3692668977889832121e+144)
assert u.imag.ae(-1.5707963267948966192)
u = gammainc(-1,-350)
assert u.ae(8.2685354361441858669e+146)
assert u.imag.ae(3.1415926535897932385)
u = gammainc(0,-350)
assert u.ae(-2.8856710698020863568e+149)
assert u.imag.ae(-3.1415926535897932385)
u = gammainc(1,-350)
assert u.ae(1.0070908870280797598e+152)
assert u.imag == 0
u = gammainc(2,-350)
assert u.ae(-3.5147471957279983618e+154)
assert u.imag == 0
u = gammainc(3,-350)
assert u.ae(1.2266568422179417091e+157)
assert u.imag == 0
# Extreme asymptotic case
assert gammainc(-3,350000000000000000000000).ae('5.0362468738874738859e-152003068666138139677990', abs_eps=0, rel_eps=8*eps)
assert gammainc(-2,350000000000000000000000).ae('1.7626864058606158601e-152003068666138139677966', abs_eps=0, rel_eps=8*eps)
assert gammainc(-1,350000000000000000000000).ae('6.1694024205121555102e-152003068666138139677943', abs_eps=0, rel_eps=8*eps)
assert gammainc(0,350000000000000000000000).ae('2.1592908471792544286e-152003068666138139677919', abs_eps=0, rel_eps=8*eps)
assert gammainc(1,350000000000000000000000).ae('7.5575179651273905e-152003068666138139677896', abs_eps=0, rel_eps=8*eps)
assert gammainc(2,350000000000000000000000).ae('2.645131287794586675e-152003068666138139677872', abs_eps=0, rel_eps=8*eps)
assert gammainc(3,350000000000000000000000).ae('9.2579595072810533625e-152003068666138139677849', abs_eps=0, rel_eps=8*eps)
u = gammainc(-3,-350000000000000000000000)
assert u.ae('8.8175642804468234866e+152003068666138139677800')
assert u.imag.ae(0.52359877559829887308)
u = gammainc(-2,-350000000000000000000000)
assert u.ae('-3.0861474981563882203e+152003068666138139677824')
assert u.imag.ae(-1.5707963267948966192)
u = gammainc(-1,-350000000000000000000000)
assert u.ae('1.0801516243547358771e+152003068666138139677848')
assert u.imag.ae(3.1415926535897932385)
u = gammainc(0,-350000000000000000000000)
assert u.ae('-3.7805306852415755699e+152003068666138139677871')
assert u.imag.ae(-3.1415926535897932385)
assert gammainc(1,-350000000000000000000000).ae('1.3231857398345514495e+152003068666138139677895')
assert gammainc(2,-350000000000000000000000).ae('-4.6311500894209300731e+152003068666138139677918')
assert gammainc(3,-350000000000000000000000).ae('1.6209025312973255256e+152003068666138139677942')
def test_incomplete_beta():
mp.dps = 15
assert betainc(-2,-3,0.5,0.75).ae(63.4305673311255413583969)
assert betainc(4.5,0.5+2j,2.5,6).ae(0.2628801146130621387903065 + 0.5162565234467020592855378j)
assert betainc(4,5,0,6).ae(90747.77142857142857142857)
def test_erf():
mp.dps = 15
assert erf(0) == 0
assert erf(1).ae(0.84270079294971486934)
assert erf(3+4j).ae(-120.186991395079444098 - 27.750337293623902498j)
assert erf(-4-3j).ae(-0.99991066178539168236 + 0.00004972026054496604j)
assert erf(pi).ae(0.99999112385363235839)
assert erf(1j).ae(1.6504257587975428760j)
assert erf(-1j).ae(-1.6504257587975428760j)
assert isinstance(erf(1), mpf)
assert isinstance(erf(-1), mpf)
assert isinstance(erf(0), mpf)
assert isinstance(erf(0j), mpc)
assert erf(inf) == 1
assert erf(-inf) == -1
assert erfi(0) == 0
assert erfi(1/pi).ae(0.371682698493894314)
assert erfi(inf) == inf
assert erfi(-inf) == -inf
assert erf(1+0j) == erf(1)
assert erfc(1+0j) == erfc(1)
assert erf(0.2+0.5j).ae(1 - erfc(0.2+0.5j))
assert erfc(0) == 1
assert erfc(1).ae(1-erf(1))
assert erfc(-1).ae(1-erf(-1))
assert erfc(1/pi).ae(1-erf(1/pi))
assert erfc(-10) == 2
assert erfc(-1000000) == 2
assert erfc(-inf) == 2
assert erfc(inf) == 0
assert isnan(erfc(nan))
assert (erfc(10**4)*mpf(10)**43429453).ae('3.63998738656420')
assert erf(8+9j).ae(-1072004.2525062051158 + 364149.91954310255423j)
assert erfc(8+9j).ae(1072005.2525062051158 - 364149.91954310255423j)
assert erfc(-8-9j).ae(-1072003.2525062051158 + 364149.91954310255423j)
mp.dps = 50
# This one does not use the asymptotic series
assert (erfc(10)*10**45).ae('2.0884875837625447570007862949577886115608181193212')
# This one does
assert (erfc(50)*10**1088).ae('2.0709207788416560484484478751657887929322509209954')
mp.dps = 15
assert str(erfc(10**50)) == '3.66744826532555e-4342944819032518276511289189166050822943970058036665661144537831658646492088707747292249493384317534'
assert erfinv(0) == 0
assert erfinv(0.5).ae(0.47693627620446987338)
assert erfinv(-0.5).ae(-0.47693627620446987338)
assert erfinv(1) == inf
assert erfinv(-1) == -inf
assert erf(erfinv(0.95)).ae(0.95)
assert erf(erfinv(0.999999999995)).ae(0.999999999995)
assert erf(erfinv(-0.999999999995)).ae(-0.999999999995)
mp.dps = 50
assert erf(erfinv('0.99999999999999999999999999999995')).ae('0.99999999999999999999999999999995')
assert erf(erfinv('0.999999999999999999999999999999995')).ae('0.999999999999999999999999999999995')
assert erf(erfinv('-0.999999999999999999999999999999995')).ae('-0.999999999999999999999999999999995')
mp.dps = 15
# Complex asymptotic expansions
v = erfc(50j)
assert v.real == 1
assert v.imag.ae('-6.1481820666053078736e+1083')
assert erfc(-100+5j).ae(2)
assert (erfc(100+5j)*10**4335).ae(2.3973567853824133572 - 3.9339259530609420597j)
assert erfc(100+100j).ae(0.00065234366376857698698 - 0.0039357263629214118437j)
def test_pdf():
mp.dps = 15
assert npdf(-inf) == 0
assert npdf(inf) == 0
assert npdf(5,0,2).ae(npdf(5+4,4,2))
assert quadts(lambda x: npdf(x,-0.5,0.8), [-inf, inf]) == 1
assert ncdf(0) == 0.5
assert ncdf(3,3) == 0.5
assert ncdf(-inf) == 0
assert ncdf(inf) == 1
assert ncdf(10) == 1
# Verify that this is computed accurately
assert (ncdf(-10)*10**24).ae(7.619853024160526)
def test_lambertw():
mp.dps = 15
assert lambertw(0) == 0
assert lambertw(0+0j) == 0
assert lambertw(inf) == inf
assert isnan(lambertw(nan))
assert lambertw(inf,1).real == inf
assert lambertw(inf,1).imag.ae(2*pi)
assert lambertw(-inf,1).real == inf
assert lambertw(-inf,1).imag.ae(3*pi)
assert lambertw(0,-1) == -inf
assert lambertw(0,1) == -inf
assert lambertw(0,3) == -inf
assert lambertw(e).ae(1)
assert lambertw(1).ae(0.567143290409783873)
assert lambertw(-pi/2).ae(j*pi/2)
assert lambertw(-log(2)/2).ae(-log(2))
assert lambertw(0.25).ae(0.203888354702240164)
assert lambertw(-0.25).ae(-0.357402956181388903)
assert lambertw(-1./10000,0).ae(-0.000100010001500266719)
assert lambertw(-0.25,-1).ae(-2.15329236411034965)
assert lambertw(0.25,-1).ae(-3.00899800997004620-4.07652978899159763j)
assert lambertw(-0.25,-1).ae(-2.15329236411034965)
assert lambertw(0.25,1).ae(-3.00899800997004620+4.07652978899159763j)
assert lambertw(-0.25,1).ae(-3.48973228422959210+7.41405453009603664j)
assert lambertw(-4).ae(0.67881197132094523+1.91195078174339937j)
assert lambertw(-4,1).ae(-0.66743107129800988+7.76827456802783084j)
assert lambertw(-4,-1).ae(0.67881197132094523-1.91195078174339937j)
assert lambertw(1000).ae(5.24960285240159623)
assert lambertw(1000,1).ae(4.91492239981054535+5.44652615979447070j)
assert lambertw(1000,-1).ae(4.91492239981054535-5.44652615979447070j)
assert lambertw(1000,5).ae(3.5010625305312892+29.9614548941181328j)
assert lambertw(3+4j).ae(1.281561806123775878+0.533095222020971071j)
assert lambertw(-0.4+0.4j).ae(-0.10396515323290657+0.61899273315171632j)
assert lambertw(3+4j,1).ae(-0.11691092896595324+5.61888039871282334j)
assert lambertw(3+4j,-1).ae(0.25856740686699742-3.85211668616143559j)
assert lambertw(-0.5,-1).ae(-0.794023632344689368-0.770111750510379110j)
assert lambertw(-1./10000,1).ae(-11.82350837248724344+6.80546081842002101j)
assert lambertw(-1./10000,-1).ae(-11.6671145325663544)
assert lambertw(-1./10000,-2).ae(-11.82350837248724344-6.80546081842002101j)
assert lambertw(-1./100000,4).ae(-14.9186890769540539+26.1856750178782046j)
assert lambertw(-1./100000,5).ae(-15.0931437726379218666+32.5525721210262290086j)
assert lambertw((2+j)/10).ae(0.173704503762911669+0.071781336752835511j)
assert lambertw((2+j)/10,1).ae(-3.21746028349820063+4.56175438896292539j)
assert lambertw((2+j)/10,-1).ae(-3.03781405002993088-3.53946629633505737j)
assert lambertw((2+j)/10,4).ae(-4.6878509692773249+23.8313630697683291j)
assert lambertw(-(2+j)/10).ae(-0.226933772515757933-0.164986470020154580j)
assert lambertw(-(2+j)/10,1).ae(-2.43569517046110001+0.76974067544756289j)
assert lambertw(-(2+j)/10,-1).ae(-3.54858738151989450-6.91627921869943589j)
assert lambertw(-(2+j)/10,4).ae(-4.5500846928118151+20.6672982215434637j)
mp.dps = 50
assert lambertw(pi).ae('1.073658194796149172092178407024821347547745350410314531')
mp.dps = 15
# Former bug in generated branch
assert lambertw(-0.5+0.002j).ae(-0.78917138132659918344 + 0.76743539379990327749j)
assert lambertw(-0.5-0.002j).ae(-0.78917138132659918344 - 0.76743539379990327749j)
assert lambertw(-0.448+0.4j).ae(-0.11855133765652382241 + 0.66570534313583423116j)
assert lambertw(-0.448-0.4j).ae(-0.11855133765652382241 - 0.66570534313583423116j)
assert lambertw(-0.65475+0.0001j).ae(-0.61053421111385310898+1.0396534993944097723803j)
# Huge branch index
w = lambertw(1,10**20)
assert w.real.ae(-47.889578926290259164)
assert w.imag.ae(6.2831853071795864769e+20)
def test_lambertw_hard():
def check(x,y):
y = convert(y)
type_ok = True
if isinstance(y, mpf):
type_ok = isinstance(x, mpf)
real_ok = abs(x.real-y.real) <= abs(y.real)*8*eps
imag_ok = abs(x.imag-y.imag) <= abs(y.imag)*8*eps
#print x, y, abs(x.real-y.real), abs(x.imag-y.imag)
return real_ok and imag_ok
# Evaluation near 0
mp.dps = 15
assert check(lambertw(1e-10), 9.999999999000000000e-11)
assert check(lambertw(-1e-10), -1.000000000100000000e-10)
assert check(lambertw(1e-10j), 9.999999999999999999733e-21 + 9.99999999999999999985e-11j)
assert check(lambertw(-1e-10j), 9.999999999999999999733e-21 - 9.99999999999999999985e-11j)
assert check(lambertw(1e-10,1), -26.303186778379041559 + 3.265093911703828397j)
assert check(lambertw(-1e-10,1), -26.326236166739163892 + 6.526183280686333315j)
assert check(lambertw(1e-10j,1), -26.312931726911421551 + 4.896366881798013421j)
assert check(lambertw(-1e-10j,1), -26.297238779529035066 + 1.632807161345576513j)
assert check(lambertw(1e-10,-1), -26.303186778379041559 - 3.265093911703828397j)
assert check(lambertw(-1e-10,-1), -26.295238819246925694)
assert check(lambertw(1e-10j,-1), -26.297238779529035028 - 1.6328071613455765135j)
assert check(lambertw(-1e-10j,-1), -26.312931726911421551 - 4.896366881798013421j)
# Test evaluation very close to the branch point -1/e
# on the -1, 0, and 1 branches
add = lambda x, y: fadd(x,y,exact=True)
sub = lambda x, y: fsub(x,y,exact=True)
addj = lambda x, y: fadd(x,fmul(y,1j,exact=True),exact=True)
subj = lambda x, y: fadd(x,fmul(y,-1j,exact=True),exact=True)
mp.dps = 1500
a = -1/e + 10*eps
d3 = mpf('1e-3')
d10 = mpf('1e-10')
d20 = mpf('1e-20')
d40 = mpf('1e-40')
d80 = mpf('1e-80')
d300 = mpf('1e-300')
d1000 = mpf('1e-1000')
mp.dps = 15
# ---- Branch 0 ----
# -1/e + eps
assert check(lambertw(add(a,d3)), -0.92802015005456704876)
assert check(lambertw(add(a,d10)), -0.99997668374140088071)
assert check(lambertw(add(a,d20)), -0.99999999976683560186)
assert lambertw(add(a,d40)) == -1
assert lambertw(add(a,d80)) == -1
assert lambertw(add(a,d300)) == -1
assert lambertw(add(a,d1000)) == -1
# -1/e - eps
assert check(lambertw(sub(a,d3)), -0.99819016149860989001+0.07367191188934638577j)
assert check(lambertw(sub(a,d10)), -0.9999999998187812114595992+0.0000233164398140346109194j)
assert check(lambertw(sub(a,d20)), -0.99999999999999999998187+2.331643981597124203344e-10j)
assert check(lambertw(sub(a,d40)), -1.0+2.33164398159712420336e-20j)
assert check(lambertw(sub(a,d80)), -1.0+2.33164398159712420336e-40j)
assert check(lambertw(sub(a,d300)), -1.0+2.33164398159712420336e-150j)
assert check(lambertw(sub(a,d1000)), mpc(-1,'2.33164398159712420336e-500'))
# -1/e + eps*j
assert check(lambertw(addj(a,d3)), -0.94790387486938526634+0.05036819639190132490j)
assert check(lambertw(addj(a,d10)), -0.9999835127872943680999899+0.0000164870314895821225256j)
assert check(lambertw(addj(a,d20)), -0.999999999835127872929987+1.64872127051890935830e-10j)
assert check(lambertw(addj(a,d40)), -0.9999999999999999999835+1.6487212707001281468305e-20j)
assert check(lambertw(addj(a,d80)), -1.0 + 1.64872127070012814684865e-40j)
assert check(lambertw(addj(a,d300)), -1.0 + 1.64872127070012814684865e-150j)
assert check(lambertw(addj(a,d1000)), mpc(-1.0,'1.64872127070012814684865e-500'))
# -1/e - eps*j
assert check(lambertw(subj(a,d3)), -0.94790387486938526634-0.05036819639190132490j)
assert check(lambertw(subj(a,d10)), -0.9999835127872943680999899-0.0000164870314895821225256j)
assert check(lambertw(subj(a,d20)), -0.999999999835127872929987-1.64872127051890935830e-10j)
assert check(lambertw(subj(a,d40)), -0.9999999999999999999835-1.6487212707001281468305e-20j)
assert check(lambertw(subj(a,d80)), -1.0 - 1.64872127070012814684865e-40j)
assert check(lambertw(subj(a,d300)), -1.0 - 1.64872127070012814684865e-150j)
assert check(lambertw(subj(a,d1000)), mpc(-1.0,'-1.64872127070012814684865e-500'))
# ---- Branch 1 ----
assert check(lambertw(addj(a,d3),1), -3.088501303219933378005990 + 7.458676867597474813950098j)
assert check(lambertw(addj(a,d80),1), -3.088843015613043855957087 + 7.461489285654254556906117j)
assert check(lambertw(addj(a,d300),1), -3.088843015613043855957087 + 7.461489285654254556906117j)
assert check(lambertw(addj(a,d1000),1), -3.088843015613043855957087 + 7.461489285654254556906117j)
assert check(lambertw(subj(a,d3),1), -1.0520914180450129534365906 + 0.0539925638125450525673175j)
assert check(lambertw(subj(a,d10),1), -1.0000164872127056318529390 + 0.000016487393927159250398333077j)
assert check(lambertw(subj(a,d20),1), -1.0000000001648721270700128 + 1.64872127088134693542628e-10j)
assert check(lambertw(subj(a,d40),1), -1.000000000000000000016487 + 1.64872127070012814686677e-20j)
assert check(lambertw(subj(a,d80),1), -1.0 + 1.64872127070012814684865e-40j)
assert check(lambertw(subj(a,d300),1), -1.0 + 1.64872127070012814684865e-150j)
assert check(lambertw(subj(a,d1000),1), mpc(-1.0, '1.64872127070012814684865e-500'))
# ---- Branch -1 ----
# -1/e + eps
assert check(lambertw(add(a,d3),-1), -1.075608941186624989414945)
assert check(lambertw(add(a,d10),-1), -1.000023316621036696460620)
assert check(lambertw(add(a,d20),-1), -1.000000000233164398177834)
assert lambertw(add(a,d40),-1) == -1
assert lambertw(add(a,d80),-1) == -1
assert lambertw(add(a,d300),-1) == -1
assert lambertw(add(a,d1000),-1) == -1
# -1/e - eps
assert check(lambertw(sub(a,d3),-1), -0.99819016149860989001-0.07367191188934638577j)
assert check(lambertw(sub(a,d10),-1), -0.9999999998187812114595992-0.0000233164398140346109194j)
assert check(lambertw(sub(a,d20),-1), -0.99999999999999999998187-2.331643981597124203344e-10j)
assert check(lambertw(sub(a,d40),-1), -1.0-2.33164398159712420336e-20j)
assert check(lambertw(sub(a,d80),-1), -1.0-2.33164398159712420336e-40j)
assert check(lambertw(sub(a,d300),-1), -1.0-2.33164398159712420336e-150j)
assert check(lambertw(sub(a,d1000),-1), mpc(-1,'-2.33164398159712420336e-500'))
# -1/e + eps*j
assert check(lambertw(addj(a,d3),-1), -1.0520914180450129534365906 - 0.0539925638125450525673175j)
assert check(lambertw(addj(a,d10),-1), -1.0000164872127056318529390 - 0.0000164873939271592503983j)
assert check(lambertw(addj(a,d20),-1), -1.0000000001648721270700 - 1.64872127088134693542628e-10j)
assert check(lambertw(addj(a,d40),-1), -1.00000000000000000001648 - 1.6487212707001281468667726e-20j)
assert check(lambertw(addj(a,d80),-1), -1.0 - 1.64872127070012814684865e-40j)
assert check(lambertw(addj(a,d300),-1), -1.0 - 1.64872127070012814684865e-150j)
assert check(lambertw(addj(a,d1000),-1), mpc(-1.0,'-1.64872127070012814684865e-500'))
# -1/e - eps*j
assert check(lambertw(subj(a,d3),-1), -3.088501303219933378005990-7.458676867597474813950098j)
assert check(lambertw(subj(a,d10),-1), -3.088843015579260686911033-7.461489285372968780020716j)
assert check(lambertw(subj(a,d20),-1), -3.088843015613043855953708-7.461489285654254556877988j)
assert check(lambertw(subj(a,d40),-1), -3.088843015613043855957087-7.461489285654254556906117j)
assert check(lambertw(subj(a,d80),-1), -3.088843015613043855957087 - 7.461489285654254556906117j)
assert check(lambertw(subj(a,d300),-1), -3.088843015613043855957087 - 7.461489285654254556906117j)
assert check(lambertw(subj(a,d1000),-1), -3.088843015613043855957087 - 7.461489285654254556906117j)
# One more case, testing higher precision
mp.dps = 500
x = -1/e + mpf('1e-13')
ans = "-0.99999926266961377166355784455394913638782494543377383"\
"744978844374498153493943725364881490261187530235150668593869563"\
"168276697689459394902153960200361935311512317183678882"
mp.dps = 15
assert lambertw(x).ae(ans)
mp.dps = 50
assert lambertw(x).ae(ans)
mp.dps = 150
assert lambertw(x).ae(ans)
def test_meijerg():
mp.dps = 15
assert meijerg([[2,3],[1]],[[0.5,2],[3,4]], 2.5).ae(4.2181028074787439386)
assert meijerg([[],[1+j]],[[1],[1]], 3+4j).ae(271.46290321152464592 - 703.03330399954820169j)
assert meijerg([[0.25],[1]],[[0.5],[2]],0) == 0
assert meijerg([[0],[]],[[0,0,'1/3','2/3'], []], '2/27').ae(2.2019391389653314120)
# Verify 1/z series being used
assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -0.5).ae(-1.338096165935754898687431)
assert meijerg([[1-(-1)],[1-(-2.5)]], [[1-(-3)],[1-(-0.5)]], -2.0).ae(-1.338096165935754898687431)
assert meijerg([[-3],[-0.5]], [[-1],[-2.5]], -1).ae(-(pi+4)/(4*pi))
a = 2.5
b = 1.25
for z in [mpf(0.25), mpf(2)]:
x1 = hyp1f1(a,b,z)
x2 = gamma(b)/gamma(a)*meijerg([[1-a],[]],[[0],[1-b]],-z)
x3 = gamma(b)/gamma(a)*meijerg([[1-0],[1-(1-b)]],[[1-(1-a)],[]],-1/z)
assert x1.ae(x2)
assert x1.ae(x3)
def test_appellf1():
mp.dps = 15
assert appellf1(2,-2,1,1,2,3).ae(-1.75)
assert appellf1(2,1,-2,1,2,3).ae(-8)
assert appellf1(2,1,-2,1,0.5,0.25).ae(1.5)
assert appellf1(-2,1,3,2,3,3).ae(19)
assert appellf1(1,2,3,4,0.5,0.125).ae( 1.53843285792549786518)
def test_coulomb():
# Note: most tests are doctests
# Test for a bug:
mp.dps = 15
assert coulombg(mpc(-5,0),2,3).ae(20.087729487721430394)
def test_hyper_param_accuracy():
mp.dps = 15
As = [n+1e-10 for n in range(-5,-1)]
Bs = [n+1e-10 for n in range(-12,-5)]
assert hyper(As,Bs,10).ae(-381757055858.652671927)
assert legenp(0.5, 100, 0.25).ae(-2.4124576567211311755e+144)
assert (hyp1f1(1000,1,-100)*10**24).ae(5.2589445437370169113)
assert (hyp2f1(10, -900, 10.5, 0.99)*10**24).ae(1.9185370579660768203)
assert (hyp2f1(1000,1.5,-3.5,-1.5)*10**385).ae(-2.7367529051334000764)
assert hyp2f1(-5, 10, 3, 0.5, zeroprec=500) == 0
assert (hyp1f1(-10000, 1000, 100)*10**424).ae(-3.1046080515824859974)
assert (hyp2f1(1000,1.5,-3.5,-0.75,maxterms=100000)*10**231).ae(-4.0534790813913998643)
assert legenp(2, 3, 0.25) == 0
pytest.raises(ValueError, lambda: hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3]))
assert hypercomb(lambda a: [([],[],[],[],[a],[-a],0.5)], [3], infprec=200) == inf
assert meijerg([[],[]],[[0,0,0,0],[]],0.1).ae(1.5680822343832351418)
assert (besselk(400,400)*10**94).ae(1.4387057277018550583)
mp.dps = 5
(hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522)
(hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311)
mp.dps = 15
(hyp1f1(-5000.5, 1500, 100)*10**185).ae(8.5185229673381935522)
(hyp1f1(-5000, 1500, 100)*10**185).ae(9.1501213424563944311)
assert hyp0f1(fadd(-20,'1e-100',exact=True), 0.25).ae(1.85014429040102783e+49)
assert hyp0f1((-20*10**100+1, 10**100), 0.25).ae(1.85014429040102783e+49)
def test_hypercomb_zero_pow():
# check that 0^0 = 1
assert hypercomb(lambda a: (([0],[a],[],[],[],[],0),), [0]) == 1
assert meijerg([[-1.5],[]],[[0],[-0.75]],0).ae(1.4464090846320771425)
def test_spherharm():
mp.dps = 15
t = 0.5; r = 0.25
assert spherharm(0,0,t,r).ae(0.28209479177387814347)
assert spherharm(1,-1,t,r).ae(0.16048941205971996369 - 0.04097967481096344271j)
assert spherharm(1,0,t,r).ae(0.42878904414183579379)
assert spherharm(1,1,t,r).ae(-0.16048941205971996369 - 0.04097967481096344271j)
assert spherharm(2,-2,t,r).ae(0.077915886919031181734 - 0.042565643022253962264j)
assert spherharm(2,-1,t,r).ae(0.31493387233497459884 - 0.08041582001959297689j)
assert spherharm(2,0,t,r).ae(0.41330596756220761898)
assert spherharm(2,1,t,r).ae(-0.31493387233497459884 - 0.08041582001959297689j)
assert spherharm(2,2,t,r).ae(0.077915886919031181734 + 0.042565643022253962264j)
assert spherharm(3,-3,t,r).ae(0.033640236589690881646 - 0.031339125318637082197j)
assert spherharm(3,-2,t,r).ae(0.18091018743101461963 - 0.09883168583167010241j)
assert spherharm(3,-1,t,r).ae(0.42796713930907320351 - 0.10927795157064962317j)
assert spherharm(3,0,t,r).ae(0.27861659336351639787)
assert spherharm(3,1,t,r).ae(-0.42796713930907320351 - 0.10927795157064962317j)
assert spherharm(3,2,t,r).ae(0.18091018743101461963 + 0.09883168583167010241j)
assert spherharm(3,3,t,r).ae(-0.033640236589690881646 - 0.031339125318637082197j)
assert spherharm(0,-1,t,r) == 0
assert spherharm(0,-2,t,r) == 0
assert spherharm(0,1,t,r) == 0
assert spherharm(0,2,t,r) == 0
assert spherharm(1,2,t,r) == 0
assert spherharm(1,3,t,r) == 0
assert spherharm(1,-2,t,r) == 0
assert spherharm(1,-3,t,r) == 0
assert spherharm(2,3,t,r) == 0
assert spherharm(2,4,t,r) == 0
assert spherharm(2,-3,t,r) == 0
assert spherharm(2,-4,t,r) == 0
assert spherharm(3,4.5,0.5,0.25).ae(-22.831053442240790148 + 10.910526059510013757j)
assert spherharm(2+3j, 1-j, 1+j, 3+4j).ae(-2.6582752037810116935 - 1.0909214905642160211j)
assert spherharm(-6,2.5,t,r).ae(0.39383644983851448178 + 0.28414687085358299021j)
assert spherharm(-3.5, 3, 0.5, 0.25).ae(0.014516852987544698924 - 0.015582769591477628495j)
assert spherharm(-3, 3, 0.5, 0.25) == 0
assert spherharm(-6, 3, 0.5, 0.25).ae(-0.16544349818782275459 - 0.15412657723253924562j)
assert spherharm(-6, 1.5, 0.5, 0.25).ae(0.032208193499767402477 + 0.012678000924063664921j)
assert spherharm(3,0,0,1).ae(0.74635266518023078283)
assert spherharm(3,-2,0,1) == 0
assert spherharm(3,-2,1,1).ae(-0.16270707338254028971 - 0.35552144137546777097j)
def test_qfunctions():
mp.dps = 15
assert qp(2,3,100).ae('2.7291482267247332183e2391')
def test_issue_239():
mp.prec = 150
x = ldexp(2476979795053773,-52)
assert betainc(206, 385, 0, 0.55, 1).ae('0.99999999999999999999996570910644857895771110649954')
mp.dps = 15
pytest.raises(ValueError, lambda: hyp2f1(-5,5,0.5,0.5))
# Extra stress testing for Bessel functions
# Reference zeros generated with the aid of scipy.special
# jn_zero, jnp_zero, yn_zero, ynp_zero
V = 15
M = 15
jn_small_zeros = \
[[2.4048255576957728,
5.5200781102863106,
8.6537279129110122,
11.791534439014282,
14.930917708487786,
18.071063967910923,
21.211636629879259,
24.352471530749303,
27.493479132040255,
30.634606468431975,
33.775820213573569,
36.917098353664044,
40.058425764628239,
43.19979171317673,
46.341188371661814],
[3.8317059702075123,
7.0155866698156188,
10.173468135062722,
13.323691936314223,
16.470630050877633,
19.615858510468242,
22.760084380592772,
25.903672087618383,
29.046828534916855,
32.189679910974404,
35.332307550083865,
38.474766234771615,
41.617094212814451,
44.759318997652822,
47.901460887185447],
[5.1356223018406826,
8.4172441403998649,
11.619841172149059,
14.795951782351261,
17.959819494987826,
21.116997053021846,
24.270112313573103,
27.420573549984557,
30.569204495516397,
33.7165195092227,
36.86285651128381,
40.008446733478192,
43.153453778371463,
46.297996677236919,
49.442164110416873],
[6.3801618959239835,
9.7610231299816697,
13.015200721698434,
16.223466160318768,
19.409415226435012,
22.582729593104442,
25.748166699294978,
28.908350780921758,
32.064852407097709,
35.218670738610115,
38.370472434756944,
41.520719670406776,
44.669743116617253,
47.817785691533302,
50.965029906205183],
[7.5883424345038044,
11.064709488501185,
14.37253667161759,
17.615966049804833,
20.826932956962388,
24.01901952477111,
27.199087765981251,
30.371007667117247,
33.537137711819223,
36.699001128744649,
39.857627302180889,
43.01373772335443,
46.167853512924375,
49.320360686390272,
52.471551398458023],
[8.771483815959954,
12.338604197466944,
15.700174079711671,
18.980133875179921,
22.217799896561268,
25.430341154222704,
28.626618307291138,
31.811716724047763,
34.988781294559295,
38.159868561967132,
41.326383254047406,
44.489319123219673,
47.649399806697054,
50.80716520300633,
53.963026558378149],
[9.9361095242176849,
13.589290170541217,
17.003819667816014,
20.320789213566506,
23.58608443558139,
26.820151983411405,
30.033722386570469,
33.233041762847123,
36.422019668258457,
39.603239416075404,
42.778481613199507,
45.949015998042603,
49.11577372476426,
52.279453903601052,
55.440592068853149],
[11.086370019245084,
14.821268727013171,
18.287582832481726,
21.641541019848401,
24.934927887673022,
28.191188459483199,
31.42279419226558,
34.637089352069324,
37.838717382853611,
41.030773691585537,
44.21540850526126,
47.394165755570512,
50.568184679795566,
53.738325371963291,
56.905249991978781],
[12.225092264004655,
16.037774190887709,
19.554536430997055,
22.94517313187462,
26.266814641176644,
29.54565967099855,
32.795800037341462,
36.025615063869571,
39.240447995178135,
42.443887743273558,
45.638444182199141,
48.825930381553857,
52.007691456686903,
55.184747939289049,
58.357889025269694],
[13.354300477435331,
17.241220382489128,
20.807047789264107,
24.233885257750552,
27.583748963573006,
30.885378967696675,
34.154377923855096,
37.400099977156589,
40.628553718964528,
43.843801420337347,
47.048700737654032,
50.245326955305383,
53.435227157042058,
56.619580266508436,
59.799301630960228],
[14.475500686554541,
18.433463666966583,
22.046985364697802,
25.509450554182826,
28.887375063530457,
32.211856199712731,
35.499909205373851,
38.761807017881651,
42.004190236671805,
45.231574103535045,
48.447151387269394,
51.653251668165858,
54.851619075963349,
58.043587928232478,
61.230197977292681],
[15.589847884455485,
19.61596690396692,
23.275853726263409,
26.773322545509539,
30.17906117878486,
33.526364075588624,
36.833571341894905,
40.111823270954241,
43.368360947521711,
46.608132676274944,
49.834653510396724,
53.050498959135054,
56.257604715114484,
59.457456908388002,
62.651217388202912],
[16.698249933848246,
20.789906360078443,
24.494885043881354,
28.026709949973129,
31.45996003531804,
34.829986990290238,
38.156377504681354,
41.451092307939681,
44.721943543191147,
47.974293531269048,
51.211967004101068,
54.437776928325074,
57.653844811906946,
60.8618046824805,
64.062937824850136],
[17.801435153282442,
21.95624406783631,
25.705103053924724,
29.270630441874802,
32.731053310978403,
36.123657666448762,
39.469206825243883,
42.780439265447158,
46.06571091157561,
49.330780096443524,
52.579769064383396,
55.815719876305778,
59.040934037249271,
62.257189393731728,
65.465883797232125],
[18.899997953174024,
23.115778347252756,
26.907368976182104,
30.505950163896036,
33.993184984781542,
37.408185128639695,
40.772827853501868,
44.100590565798301,
47.400347780543231,
50.678236946479898,
53.93866620912693,
57.184898598119301,
60.419409852130297,
63.644117508962281,
66.860533012260103]]
jnp_small_zeros = \
[[0.0,
3.8317059702075123,
7.0155866698156188,
10.173468135062722,
13.323691936314223,
16.470630050877633,
19.615858510468242,
22.760084380592772,
25.903672087618383,
29.046828534916855,
32.189679910974404,
35.332307550083865,
38.474766234771615,
41.617094212814451,
44.759318997652822],
[1.8411837813406593,
5.3314427735250326,
8.5363163663462858,
11.706004902592064,
14.863588633909033,
18.015527862681804,
21.16436985918879,
24.311326857210776,
27.457050571059246,
30.601922972669094,
33.746182898667383,
36.889987409236811,
40.033444053350675,
43.176628965448822,
46.319597561173912],
[3.0542369282271403,
6.7061331941584591,
9.9694678230875958,
13.170370856016123,
16.347522318321783,
19.512912782488205,
22.671581772477426,
25.826037141785263,
28.977672772993679,
32.127327020443474,
35.275535050674691,
38.422654817555906,
41.568934936074314,
44.714553532819734,
47.859641607992093],
[4.2011889412105285,
8.0152365983759522,
11.345924310743006,
14.585848286167028,
17.78874786606647,
20.9724769365377,
24.144897432909265,
27.310057930204349,
30.470268806290424,
33.626949182796679,
36.781020675464386,
39.933108623659488,
43.083652662375079,
46.232971081836478,
49.381300092370349],
[5.3175531260839944,
9.2823962852416123,
12.681908442638891,
15.964107037731551,
19.196028800048905,
22.401032267689004,
25.589759681386733,
28.767836217666503,
31.938539340972783,
35.103916677346764,
38.265316987088158,
41.423666498500732,
44.579623137359257,
47.733667523865744,
50.886159153182682],
[6.4156163757002403,
10.519860873772308,
13.9871886301403,
17.312842487884625,
20.575514521386888,
23.803581476593863,
27.01030789777772,
30.20284907898166,
33.385443901010121,
36.560777686880356,
39.730640230067416,
42.896273163494417,
46.058566273567043,
49.218174614666636,
52.375591529563596],
[7.501266144684147,
11.734935953042708,
15.268181461097873,
18.637443009666202,
21.931715017802236,
25.183925599499626,
28.409776362510085,
31.617875716105035,
34.81339298429743,
37.999640897715301,
41.178849474321413,
44.352579199070217,
47.521956905768113,
50.687817781723741,
53.85079463676896],
[8.5778364897140741,
12.932386237089576,
16.529365884366944,
19.941853366527342,
23.268052926457571,
26.545032061823576,
29.790748583196614,
33.015178641375142,
36.224380548787162,
39.422274578939259,
42.611522172286684,
45.793999658055002,
48.971070951900596,
52.143752969301988,
55.312820330403446],
[9.6474216519972168,
14.115518907894618,
17.774012366915256,
21.229062622853124,
24.587197486317681,
27.889269427955092,
31.155326556188325,
34.39662855427218,
37.620078044197086,
40.830178681822041,
44.030010337966153,
47.221758471887113,
50.407020967034367,
53.586995435398319,
56.762598475105272],
[10.711433970699945,
15.28673766733295,
19.004593537946053,
22.501398726777283,
25.891277276839136,
29.218563499936081,
32.505247352375523,
35.763792928808799,
39.001902811514218,
42.224638430753279,
45.435483097475542,
48.636922645305525,
51.830783925834728,
55.01844255063594,
58.200955824859509],
[11.770876674955582,
16.447852748486498,
20.223031412681701,
23.760715860327448,
27.182021527190532,
30.534504754007074,
33.841965775135715,
37.118000423665604,
40.371068905333891,
43.606764901379516,
46.828959446564562,
50.040428970943456,
53.243223214220535,
56.438892058982552,
59.628631306921512],
[12.826491228033465,
17.600266557468326,
21.430854238060294,
25.008518704644261,
28.460857279654847,
31.838424458616998,
35.166714427392629,
38.460388720328256,
41.728625562624312,
44.977526250903469,
48.211333836373288,
51.433105171422278,
54.645106240447105,
57.849056857839799,
61.046288512821078],
[13.878843069697276,
18.745090916814406,
22.629300302835503,
26.246047773946584,
29.72897816891134,
33.131449953571661,
36.480548302231658,
39.791940718940855,
43.075486800191012,
46.337772104541405,
49.583396417633095,
52.815686826850452,
56.037118687012179,
59.249577075517968,
62.454525995970462],
[14.928374492964716,
19.88322436109951,
23.81938909003628,
27.474339750968247,
30.987394331665278,
34.414545662167183,
37.784378506209499,
41.113512376883377,
44.412454519229281,
47.688252845993366,
50.945849245830813,
54.188831071035124,
57.419876154678179,
60.641030026538746,
63.853885828967512],
[15.975438807484321,
21.015404934568315,
25.001971500138194,
28.694271223110755,
32.236969407878118,
35.688544091185301,
39.078998185245057,
42.425854432866141,
45.740236776624833,
49.029635055514276,
52.299319390331728,
55.553127779547459,
58.793933759028134,
62.02393848337554,
65.244860767043859]]
yn_small_zeros = \
[[0.89357696627916752,
3.9576784193148579,
7.0860510603017727,
10.222345043496417,
13.361097473872763,
16.500922441528091,
19.64130970088794,
22.782028047291559,
25.922957653180923,
29.064030252728398,
32.205204116493281,
35.346452305214321,
38.487756653081537,
41.629104466213808,
44.770486607221993],
[2.197141326031017,
5.4296810407941351,
8.5960058683311689,
11.749154830839881,
14.897442128336725,
18.043402276727856,
21.188068934142213,
24.331942571356912,
27.475294980449224,
30.618286491641115,
33.761017796109326,
36.90355531614295,
40.045944640266876,
43.188218097393211,
46.330399250701687],
[3.3842417671495935,
6.7938075132682675,
10.023477979360038,
13.209986710206416,
16.378966558947457,
19.539039990286384,
22.69395593890929,
25.845613720902269,
28.995080395650151,
32.143002257627551,
35.289793869635804,
38.435733485446343,
41.581014867297885,
44.725777117640461,
47.870122696676504],
[4.5270246611496439,
8.0975537628604907,
11.396466739595867,
14.623077742393873,
17.81845523294552,
20.997284754187761,
24.166235758581828,
27.328799850405162,
30.486989604098659,
33.642049384702463,
36.794791029185579,
39.945767226378749,
43.095367507846703,
46.2438744334407,
49.391498015725107],
[5.6451478942208959,
9.3616206152445429,
12.730144474090465,
15.999627085382479,
19.22442895931681,
22.424810599698521,
25.610267054939328,
28.785893657666548,
31.954686680031668,
35.118529525584828,
38.278668089521758,
41.435960629910073,
44.591018225353424,
47.744288086361052,
50.896105199722123],
[6.7471838248710219,
10.597176726782031,
14.033804104911233,
17.347086393228382,
20.602899017175335,
23.826536030287532,
27.030134937138834,
30.220335654231385,
33.401105611047908,
36.574972486670962,
39.743627733020277,
42.908248189569535,
46.069679073215439,
49.228543693445843,
52.385312123112282],
[7.8377378223268716,
11.811037107609447,
15.313615118517857,
18.670704965906724,
21.958290897126571,
25.206207715021249,
28.429037095235496,
31.634879502950644,
34.828638524084437,
38.013473399691765,
41.19151880917741,
44.364272633271975,
47.53281875312084,
50.697961822183806,
53.860312300118388],
[8.919605734873789,
13.007711435388313,
16.573915129085334,
19.974342312352426,
23.293972585596648,
26.5667563757203,
29.809531451608321,
33.031769327150685,
36.239265816598239,
39.435790312675323,
42.623910919472727,
45.805442883111651,
48.981708325514764,
52.153694518185572,
55.322154420959698],
[9.9946283820824834,
14.190361295800141,
17.817887841179873,
21.26093227125945,
24.612576377421522,
27.910524883974868,
31.173701563441602,
34.412862242025045,
37.634648706110989,
40.843415321050884,
44.04214994542435,
47.232978012841169,
50.417456447370186,
53.596753874948731,
56.771765754432457],
[11.064090256031013,
15.361301343575925,
19.047949646361388,
22.532765416313869,
25.91620496332662,
29.2394205079349,
32.523270869465881,
35.779715464475261,
39.016196664616095,
42.237627509803703,
45.4474001519274,
48.647941127433196,
51.841036928216499,
55.028034667184916,
58.209970905250097],
[12.128927704415439,
16.522284394784426,
20.265984501212254,
23.791669719454272,
27.206568881574774,
30.555020011020762,
33.859683872746356,
37.133649760307504,
40.385117593813002,
43.619533085646856,
46.840676630553575,
50.051265851897857,
53.253310556711732,
56.448332488918971,
59.637507005589829],
[13.189846995683845,
17.674674253171487,
21.473493977824902,
25.03913093040942,
28.485081336558058,
31.858644293774859,
35.184165245422787,
38.475796636190897,
41.742455848758449,
44.990096293791186,
48.222870660068338,
51.443777308699826,
54.655042589416311,
57.858358441436511,
61.055036135780528],
[14.247395665073945,
18.819555894710682,
22.671697117872794,
26.276375544903892,
29.752925495549038,
33.151412708998983,
36.497763772987645,
39.807134090704376,
43.089121522203808,
46.350163579538652,
49.594769786270069,
52.82620892320143,
56.046916910756961,
59.258751140598783,
62.463155567737854],
[15.30200785858925,
19.957808654258601,
23.861599172945054,
27.504429642227545,
31.011103429019229,
34.434283425782942,
37.801385632318459,
41.128514139788358,
44.425913324440663,
47.700482714581842,
50.957073905278458,
54.199216028087261,
57.429547607017405,
60.65008661807661,
63.862406280068586],
[16.354034360047551,
21.090156519983806,
25.044040298785627,
28.724161640881914,
32.260472459522644,
35.708083982611664,
39.095820003878235,
42.440684315990936,
45.75353669045622,
49.041718113283529,
52.310408280968073,
55.56338698149062,
58.803488508906895,
62.032886550960831,
65.253280088312461]]
ynp_small_zeros = \
[[2.197141326031017,
5.4296810407941351,
8.5960058683311689,
11.749154830839881,
14.897442128336725,
18.043402276727856,
21.188068934142213,
24.331942571356912,
27.475294980449224,
30.618286491641115,
33.761017796109326,
36.90355531614295,
40.045944640266876,
43.188218097393211,
46.330399250701687],
[3.6830228565851777,
6.9414999536541757,
10.123404655436613,
13.285758156782854,
16.440058007293282,
19.590241756629495,
22.738034717396327,
25.884314618788867,
29.029575819372535,
32.174118233366201,
35.318134458192094,
38.461753870997549,
41.605066618873108,
44.74813744908079,
47.891014070791065],
[5.0025829314460639,
8.3507247014130795,
11.574195465217647,
14.760909306207676,
17.931285939466855,
21.092894504412739,
24.249231678519058,
27.402145837145258,
30.552708880564553,
33.70158627151572,
36.849213419846257,
39.995887376143356,
43.141817835750686,
46.287157097544201,
49.432018469138281],
[6.2536332084598136,
9.6987879841487711,
12.972409052292216,
16.19044719506921,
19.38238844973613,
22.559791857764261,
25.728213194724094,
28.890678419054777,
32.048984005266337,
35.204266606440635,
38.357281675961019,
41.508551443818436,
44.658448731963676,
47.807246956681162,
50.95515126455207],
[7.4649217367571329,
11.005169149809189,
14.3317235192331,
17.58443601710272,
20.801062338411128,
23.997004122902644,
27.179886689853435,
30.353960608554323,
33.521797098666792,
36.685048382072301,
39.844826969405863,
43.001910515625288,
46.15685955107263,
49.310088614282257,
52.461911043685864],
[8.6495562436971983,
12.280868725807848,
15.660799304540377,
18.949739756016503,
22.192841809428241,
25.409072788867674,
28.608039283077593,
31.795195353138159,
34.973890634255288,
38.14630522169358,
41.313923188794905,
44.477791768537617,
47.638672065035628,
50.797131066967842,
53.953600129601663],
[9.8147970120105779,
13.532811875789828,
16.965526446046053,
20.291285512443867,
23.56186260680065,
26.799499736027237,
30.015665481543419,
33.216968050039509,
36.407516858984748,
39.590015243560459,
42.766320595957378,
45.937754257017323,
49.105283450953203,
52.269633324547373,
55.431358715604255],
[10.965152105242974,
14.765687379508912,
18.250123150217555,
21.612750053384621,
24.911310600813573,
28.171051927637585,
31.40518108895689,
34.621401012564177,
37.824552065973114,
41.017847386464902,
44.203512240871601,
47.3831408366063,
50.557907466622796,
53.728697478957026,
56.896191727313342],
[12.103641941939539,
15.982840905145284,
19.517731005559611,
22.916962141504605,
26.243700855690533,
29.525960140695407,
32.778568197561124,
36.010261572392516,
39.226578757802172,
42.43122493258747,
45.626783824134354,
48.815117837929515,
51.997606404328863,
55.175294723956816,
58.348990221754937],
[13.232403808592215,
17.186756572616758,
20.770762917490496,
24.206152448722253,
27.561059462697153,
30.866053571250639,
34.137476603379774,
37.385039772270268,
40.614946085165892,
43.831373184731238,
47.037251786726299,
50.234705848765229,
53.425316228549359,
56.610286079882087,
59.790548623216652],
[14.35301374369987,
18.379337301642568,
22.011118775283494,
25.482116178696707,
28.865046588695164,
32.192853922166294,
35.483296655830277,
38.747005493021857,
41.990815194320955,
45.219355876831731,
48.435892856078888,
51.642803925173029,
54.84186659475857,
58.034439083840155,
61.221578745109862],
[15.466672066554263,
19.562077985759503,
23.240325531101082,
26.746322986645901,
30.157042415639891,
33.507642948240263,
36.817212798512775,
40.097251300178642,
43.355193847719752,
46.596103410173672,
49.823567279972794,
53.040208868780832,
56.247996968470062,
59.448441365714251,
62.642721301357187],
[16.574317035530872,
20.73617763753932,
24.459631728238804,
27.999993668839644,
31.438208790267783,
34.811512070805535,
38.140243708611251,
41.436725143893739,
44.708963264433333,
47.962435051891027,
51.201037321915983,
54.427630745992975,
57.644369734615238,
60.852911791989989,
64.054555435720397],
[17.676697936439624,
21.9026148697762,
25.670073356263225,
29.244155124266438,
32.709534477396028,
36.105399554497548,
39.453272918267025,
42.766255701958017,
46.052899215578358,
49.319076602061401,
52.568982147952547,
55.805705507386287,
59.031580956740466,
62.248409689597653,
65.457606670836759],
[18.774423978290318,
23.06220035979272,
26.872520985976736,
30.479680663499762,
33.971869047372436,
37.390118854896324,
40.757072537673599,
44.086572292170345,
47.387688809191869,
50.66667461073936,
53.928009929563275,
57.175005343085052,
60.410169281219877,
63.635442539153021,
66.85235358587768]]
@pytest.mark.slow
def test_bessel_zeros_extra():
mp.dps = 15
for v in range(V):
for m in range(1,M+1):
print(v, m, "of", V, M)
# Twice to test cache (if used)
assert besseljzero(v,m).ae(jn_small_zeros[v][m-1])
assert besseljzero(v,m).ae(jn_small_zeros[v][m-1])
assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1])
assert besseljzero(v,m,1).ae(jnp_small_zeros[v][m-1])
assert besselyzero(v,m).ae(yn_small_zeros[v][m-1])
assert besselyzero(v,m).ae(yn_small_zeros[v][m-1])
assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1])
assert besselyzero(v,m,1).ae(ynp_small_zeros[v][m-1])
| bsd-3-clause | 5ccec1b07dcf07364153fde21c426193 | 39.683725 | 255 | 0.706516 | 2.173445 | false | false | false | false |
fatiando/fatiando | cookbook/seismic_wavefd_rayleigh_wave.py | 9 | 2865 | """
Seismic: 2D finite difference simulation of elastic P and SV wave propagation
in a medium with a discontinuity (i.e., Moho), generating Rayleigh waves
"""
import numpy as np
from matplotlib import animation
from fatiando import gridder
from fatiando.seismic import wavefd
from fatiando.vis import mpl
# Set the parameters of the finite difference grid
shape = (150, 900)
area = [0, 540000, 0, 90000]
# Make a density and wave velocity model
density = 2400 * np.ones(shape)
svel = 3700 * np.ones(shape)
pvel = 6600 * np.ones(shape)
moho = 50
density[moho:] = 2800
svel[moho:] = 4300
pvel[moho:] = 7500
mu = wavefd.lame_mu(svel, density)
lamb = wavefd.lame_lamb(pvel, svel, density)
# Make a wave source from a mexican hat wavelet for the x and z directions
sources = [
[wavefd.MexHatSource(10000, 10000, area, shape, 100000, 0.5, delay=2)],
[wavefd.MexHatSource(10000, 10000, area, shape, 100000, 0.5, delay=2)]]
# Get the iterator. This part only generates an iterator object. The actual
# computations take place at each iteration in the for loop below
dt = wavefd.maxdt(area, shape, pvel.max())
duration = 130
maxit = int(duration / dt)
stations = [[400000, 0]]
snapshots = int(1. / dt)
simulation = wavefd.elastic_psv(lamb, mu, density, area, dt, maxit, sources,
stations, snapshots, padding=70, taper=0.005,
xz2ps=True)
# This part makes an animation using matplotlibs animation API
background = 10 ** -5 * ((density - density.min()) / density.max())
fig = mpl.figure(figsize=(10, 8))
mpl.subplots_adjust(right=0.98, left=0.11, hspace=0.3, top=0.93)
mpl.subplot(3, 1, 1)
mpl.title('x seismogram')
xseismogram, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.05, 0.05)
mpl.ylabel('Amplitude')
mpl.subplot(3, 1, 2)
mpl.title('z seismogram')
zseismogram, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-0.05, 0.05)
mpl.ylabel('Amplitude')
ax = mpl.subplot(3, 1, 3)
mpl.title('time: 0.0 s')
wavefield = mpl.imshow(background, extent=area, cmap=mpl.cm.gray_r,
vmin=-0.00001, vmax=0.00001)
mpl.points(stations, '^b', size=8)
mpl.text(500000, 20000, 'Crust')
mpl.text(500000, 60000, 'Mantle')
fig.text(0.7, 0.31, 'Seismometer')
mpl.xlim(area[:2])
mpl.ylim(area[2:][::-1])
mpl.xlabel('x (km)')
mpl.ylabel('z (km)')
mpl.m2km()
times = np.linspace(0, dt * maxit, maxit)
# This function updates the plot every few timesteps
def animate(i):
t, p, s, xcomp, zcomp = simulation.next()
mpl.title('time: %0.1f s' % (times[t]))
wavefield.set_array((background + p + s)[::-1])
xseismogram.set_data(times[:t + 1], xcomp[0][:t + 1])
zseismogram.set_data(times[:t + 1], zcomp[0][:t + 1])
return wavefield, xseismogram, zseismogram
anim = animation.FuncAnimation(
fig, animate, frames=maxit / snapshots, interval=1)
mpl.show()
| bsd-3-clause | 6ec742e79a7dcf3b52d8ad4e2481801f | 32.313953 | 77 | 0.670506 | 2.773475 | false | false | false | false |
fatiando/fatiando | fatiando/gridder/point_generation.py | 6 | 6022 | """
Generate points on a map as regular grids or points scatters.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
def _check_area(area):
"""
Check that the area argument is valid.
For example, the west limit should not be greater than the east limit.
"""
x1, x2, y1, y2 = area
assert x1 <= x2, \
"Invalid area dimensions {}, {}. x1 must be < x2.".format(x1, x2)
assert y1 <= y2, \
"Invalid area dimensions {}, {}. y1 must be < y2.".format(y1, y2)
def regular(area, shape, z=None):
"""
Create a regular grid.
The x directions is North-South and y East-West. Imagine the grid as a
matrix with x varying in the lines and y in columns.
Returned arrays will be flattened to 1D with ``numpy.ravel``.
.. warning::
As of version 0.4, the ``shape`` argument was corrected to be
``shape = (nx, ny)`` instead of ``shape = (ny, nx)``.
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* shape
Shape of the regular grid, ie ``(nx, ny)``.
* z
Optional. z coordinate of the grid points. If given, will return an
array with the value *z*.
Returns:
* ``[x, y]``
Numpy arrays with the x and y coordinates of the grid points
* ``[x, y, z]``
If *z* given. Numpy arrays with the x, y, and z coordinates of the grid
points
Examples:
>>> x, y = regular((0, 10, 0, 5), (5, 3))
>>> print(x)
[ 0. 0. 0. 2.5 2.5 2.5 5. 5. 5. 7.5 7.5 7.5
10. 10. 10. ]
>>> print(x.reshape((5, 3)))
[[ 0. 0. 0. ]
[ 2.5 2.5 2.5]
[ 5. 5. 5. ]
[ 7.5 7.5 7.5]
[ 10. 10. 10. ]]
>>> print(y.reshape((5, 3)))
[[ 0. 2.5 5. ]
[ 0. 2.5 5. ]
[ 0. 2.5 5. ]
[ 0. 2.5 5. ]
[ 0. 2.5 5. ]]
>>> x, y = regular((0, 0, 0, 5), (1, 3))
>>> print(x.reshape((1, 3)))
[[ 0. 0. 0.]]
>>> print(y.reshape((1, 3)))
[[ 0. 2.5 5. ]]
>>> x, y, z = regular((0, 10, 0, 5), (5, 3), z=-10)
>>> print(z.reshape((5, 3)))
[[-10. -10. -10.]
[-10. -10. -10.]
[-10. -10. -10.]
[-10. -10. -10.]
[-10. -10. -10.]]
"""
nx, ny = shape
x1, x2, y1, y2 = area
_check_area(area)
xs = np.linspace(x1, x2, nx)
ys = np.linspace(y1, y2, ny)
# Must pass ys, xs in this order because meshgrid uses the first argument
# for the columns
arrays = np.meshgrid(ys, xs)[::-1]
if z is not None:
arrays.append(z*np.ones(nx*ny, dtype=np.float))
return [i.ravel() for i in arrays]
def scatter(area, n, z=None, seed=None):
"""
Create an irregular grid with a random scattering of points.
Parameters:
* area
``(x1, x2, y1, y2)``: Borders of the grid
* n
Number of points
* z
Optional. z coordinate of the points. If given, will return an
array with the value *z*.
* seed : None or int
Seed used to generate the pseudo-random numbers. If `None`, will use a
different seed every time. Use the same seed to generate the same
random points.
Returns:
* ``[x, y]``
Numpy arrays with the x and y coordinates of the points
* ``[x, y, z]``
If *z* given. Arrays with the x, y, and z coordinates of the points
Examples:
>>> # Passing in a seed value will ensure that scatter will return the same
>>> # values given the same input. Use seed=None if you don't want this.
>>> x, y = scatter((0, 10, 0, 2), 4, seed=0)
>>> # Small function to print the array values with 4 decimal places
>>> pprint = lambda arr: print(', '.join('{:.4f}'.format(i) for i in arr))
>>> pprint(x)
5.4881, 7.1519, 6.0276, 5.4488
>>> pprint(y)
0.8473, 1.2918, 0.8752, 1.7835
>>> # scatter can take the z argument as well
>>> x2, y2, z2 = scatter((-10, 1, 1245, 3556), 6, z=-150, seed=2)
>>> pprint(x2)
-5.2041, -9.7148, -3.9537, -5.2115, -5.3760, -6.3663
>>> pprint(y2)
1717.9430, 2676.1352, 1937.5020, 1861.6378, 2680.4403, 2467.8474
>>> pprint(z2)
-150.0000, -150.0000, -150.0000, -150.0000, -150.0000, -150.0000
"""
x1, x2, y1, y2 = area
_check_area(area)
np.random.seed(seed)
arrays = [np.random.uniform(x1, x2, n), np.random.uniform(y1, y2, n)]
if z is not None:
arrays.append(z*np.ones(n))
return arrays
def circular_scatter(area, n, z=None, random=False, seed=None):
"""
Generate a set of n points positioned in a circular array.
The diameter of the circle is equal to the smallest dimension of the area
Parameters:
* area : list = [x1, x2, y1, y2]
Area inside of which the points are contained
* n : int
Number of points
* z : float or 1d-array
Optional. z coordinate of the points. If given, will return an
array with the value *z*.
* random : True or False
If True, positions of the points on the circle will be chosen at random
* seed : None or int
Seed used to generate the pseudo-random numbers if `random==True`.
If `None`, will use a different seed every time.
Use the same seed to generate the same random sequence.
Returns:
* ``[x, y]``
Numpy arrays with the x and y coordinates of the points
* ``[x, y, z]``
If *z* given. Arrays with the x, y, and z coordinates of the points
"""
x1, x2, y1, y2 = area
radius = 0.5 * min(x2 - x1, y2 - y1)
if random:
np.random.seed(seed)
angles = np.random.uniform(0, 2*np.pi, n)
np.random.seed()
else:
# The last point is the same as the first, so discard it
angles = np.linspace(0, 2*np.pi, n + 1)[:-1]
xs = 0.5*(x1 + x2) + radius*np.cos(angles)
ys = 0.5*(y1 + y2) + radius*np.sin(angles)
arrays = [xs, ys]
if z is not None:
arrays.append(z*np.ones(n))
return arrays
| bsd-3-clause | bb13c01797b68e70b75e156f3253c50d | 29.261307 | 79 | 0.549983 | 3.069317 | false | false | false | false |
fatiando/fatiando | fatiando/gravmag/sphere.py | 5 | 27981 | r"""
The potential fields of a homogeneous sphere.
"""
from __future__ import division, absolute_import
import numpy as np
from ..constants import SI2MGAL, G, CM, T2NT, SI2EOTVOS
from .. import utils
from .._our_duecredit import due, Doi
due.cite(Doi("10.1017/CBO9780511549816"),
description='Forward modeling formula for spheres.',
path='fatiando.gravmag.sphere')
# These are the second derivatives of the V = 1/r function that is used by the
# magnetic field component, total-field magnetic anomaly, gravity gradients,
# and the kernel functions.
def _v_xx(x, y, z, r_sqr, r_5):
return (3*x**2 - r_sqr)/r_5
def _v_xy(x, y, z, r_sqr, r_5):
return 3*x*y/r_5
def _v_xz(x, y, z, r_sqr, r_5):
return 3*x*z/r_5
def _v_yy(x, y, z, r_sqr, r_5):
return (3*y**2 - r_sqr)/r_5
def _v_yz(x, y, z, r_sqr, r_5):
return 3*y*z/r_5
def _v_zz(x, y, z, r_sqr, r_5):
return (3*z**2 - r_sqr)/r_5
def tf(xp, yp, zp, spheres, inc, dec, pmag=None):
r"""
The total-field magnetic anomaly.
The anomaly is defined as (Blakely, 1995):
.. math::
\Delta T = |\mathbf{T}| - |\mathbf{F}|,
where :math:`\mathbf{T}` is the measured field and :math:`\mathbf{F}` is a
reference (regional) field.
The anomaly of a homogeneous sphere can be calculated as:
.. math::
\Delta T \approx \hat{\mathbf{F}}\cdot\mathbf{B}.
where :math:`\mathbf{B}` is the magnetic induction produced by the sphere.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* inc, dec : floats
The inclination and declination of the regional field (in degrees)
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* tf : array
The total-field anomaly
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
fx, fy, fz = utils.dircos(inc, dec)
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~2x slower than this
dotprod = mx*x + my*y + mz*z
bx = (3*dotprod*x - r_sqr*mx)/r_5
by = (3*dotprod*y - r_sqr*my)/r_5
bz = (3*dotprod*z - r_sqr*mz)/r_5
res += volume*(fx*bx + fy*by + fz*bz)
res *= CM*T2NT
return res
def bx(xp, yp, zp, spheres, pmag=None):
"""
The x component of the magnetic induction.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* bx: array
The x component of the magnetic induction
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~1.3x slower than this
dotprod = mx*x + my*y + mz*z
res += volume*(3*dotprod*x - r_sqr*mx)/r_5
res *= CM * T2NT
return res
def by(xp, yp, zp, spheres, pmag=None):
"""
The y component of the magnetic induction.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* by: array
The y component of the magnetic induction
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~1.3x slower than this
dotprod = mx*x + my*y + mz*z
res += volume*(3*dotprod*y - r_sqr*my)/r_5
res *= CM * T2NT
return res
def bz(xp, yp, zp, spheres, pmag=None):
"""
The z component of the magnetic induction.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
Input units should be SI. Output is in nT.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres that are ``None`` or without
``'magnetization'`` will be ignored. The magnetization is the total
(remanent + induced + any demagnetization effects) magnetization given
as a 3-component vector.
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* bz : array
The z component of the magnetic induction
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
if pmag is not None:
pmx, pmy, pmz = pmag
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'magnetization' not in sphere.props and pmag is None:
continue
if pmag is None:
mx, my, mz = sphere.props['magnetization']
else:
mx, my, mz = pmx, pmy, pmz
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
# Calculating v_xx, etc to calculate B is ~1.3x slower than this
dotprod = mx*x + my*y + mz*z
res += volume*(3*dotprod*z - r_sqr*mz)/r_5
res *= CM * T2NT
return res
def gz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_z` gravitational acceleration component.
.. math::
g_z(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3} \dfrac{z - z'}{r^3}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in mGal.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r = np.sqrt(x**2 + y**2 + z**2)
# This is faster than r3 = r_sqrt**1.5
r_cb = r*r*r
mass = density*4*np.pi*(sphere.radius**3)/3
res += mass*z/r_cb
res *= G*SI2MGAL
return res
def gxx(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{xx}` gravity gradient component.
.. math::
g_{xx}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3 (x - x')^2 - r^2}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_xx(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gxy(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{xy}` gravity gradient component.
.. math::
g_{xy}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(x - x')(y - y')}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_xy(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gxz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{xz}` gravity gradient component.
.. math::
g_{xz}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(x - x')(z - z')}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_xz(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gyy(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{yy}` gravity gradient component.
.. math::
g_{yy}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(y - y')^2 - r^2}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_yy(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gyz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{yz}` gravity gradient component.
.. math::
g_{yz}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(y - y')(z - z')}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_yz(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def gzz(xp, yp, zp, spheres, dens=None):
r"""
The :math:`g_{zz}` gravity gradient component.
.. math::
g_{zz}(x, y, z) = \rho 4 \pi \dfrac{radius^3}{3}
\dfrac{3(z - z')^2 - r^2}{r^5}
in which :math:`\rho` is the density and
:math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in Eotvos.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. The ones
that are ``None`` or without a density will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
References:
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic
Applications, Cambridge University Press.
"""
res = 0
for sphere in spheres:
if sphere is None:
continue
if 'density' not in sphere.props and dens is None:
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res += density*volume*_v_zz(x, y, z, r_sqr, r_5)
res *= G*SI2EOTVOS
return res
def kernelxx(xp, yp, zp, sphere):
r"""
The second x derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_xx(x, y, z, r_sqr, r_5)
return res
def kernelxy(xp, yp, zp, sphere):
r"""
The xy derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_xy(x, y, z, r_sqr, r_5)
return res
def kernelxz(xp, yp, zp, sphere):
r"""
The xz derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_xz(x, y, z, r_sqr, r_5)
return res
def kernelyy(xp, yp, zp, sphere):
r"""
The second y derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_yy(x, y, z, r_sqr, r_5)
return res
def kernelyz(xp, yp, zp, sphere):
r"""
The yz derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_yz(x, y, z, r_sqr, r_5)
return res
def kernelzz(xp, yp, zp, sphere):
r"""
The second z derivative of the kernel function
.. math::
\phi(x,y,z) = \frac{4}{3} \pi radius^3 \frac{1}{r}
where :math:`r = \sqrt{(x - x')^2 + (y - y')^2 + (z - z')^2}`.
The coordinate system of the input parameters is x -> North, y -> East and
z -> Down.
All input values should be in SI and output is in SI.
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the function will be
calculated
* sphere : :class:`fatiando.mesher.Sphere`
The sphere.
Returns:
* res : array
The function calculated on xp, yp, zp
"""
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
r_sqr = x**2 + y**2 + z**2
# This is faster than r5 = r_sqrt**2.5
r = np.sqrt(r_sqr)
r_5 = r*r*r*r*r
volume = 4*np.pi*(sphere.radius**3)/3
res = volume*_v_zz(x, y, z, r_sqr, r_5)
return res
| bsd-3-clause | 42e8e32a31e8c7e5f2d4d69958a2072f | 27.235116 | 79 | 0.562274 | 3.116618 | false | false | false | false |
fatiando/fatiando | cookbook/seismic_profile_vertical.py | 7 | 1802 | """
Seismic: Invert vertical seismic profile (VSP) traveltimes for the velocity
of a layered model.
"""
import numpy
from fatiando import utils
from fatiando.seismic.profile import layered_straight_ray, LayeredStraight
from fatiando.inversion.regularization import Damping
from fatiando.vis import mpl
# The limits in velocity and depths, respectively
area = (0, 10000, 0, 100)
vmin, vmax, zmin, zmax = area
# Use the interactive functions of mpl to draw a layered model
figure = mpl.figure()
mpl.xlabel("Velocity (m/s)")
mpl.ylabel("Depth (m)")
thickness, velocity = mpl.draw_layers(area, figure.gca())
# Make some synthetic noise-corrupted travel-time data
zp = numpy.arange(zmin + 0.5, zmax, 0.5)
tts, error = utils.contaminate(
layered_straight_ray(thickness, velocity, zp),
0.02, percent=True, return_stddev=True)
# Make the solver and run the inversion using damping regularization
# (assumes known thicknesses of the layers)
solver = (LayeredStraight(tts, zp, thickness) +
0.1 * Damping(len(thickness))).fit()
velocity_ = solver.estimate_
# Plot the results
mpl.figure(figsize=(12, 5))
mpl.subplot(1, 2, 1)
mpl.grid()
mpl.title("Vertical seismic profile")
mpl.plot(tts, zp, 'ok', label='Observed')
mpl.plot(solver[0].predicted(), zp, '-r', linewidth=3, label='Predicted')
mpl.legend(loc='upper right', numpoints=1)
mpl.xlabel("Travel-time (s)")
mpl.ylabel("Z (m)")
mpl.ylim(sum(thickness), 0)
mpl.subplot(1, 2, 2)
mpl.grid()
mpl.title("Velocity profile")
mpl.layers(thickness, velocity_, 'o-k', linewidth=2, label='Estimated')
mpl.layers(thickness, velocity, '--b', linewidth=2, label='True')
mpl.ylim(zmax, zmin)
mpl.xlim(vmin, vmax)
leg = mpl.legend(loc='upper right', numpoints=1)
leg.get_frame().set_alpha(0.5)
mpl.xlabel("Velocity (m/s)")
mpl.ylabel("Z (m)")
mpl.show()
| bsd-3-clause | fdaa8d61aa579c2a40d48ef2b8961787 | 33.653846 | 75 | 0.724195 | 2.901771 | false | false | false | false |
fatiando/fatiando | fatiando/gravmag/euler.py | 6 | 14010 | # coding: utf-8
"""
Euler deconvolution methods for potential fields.
* :class:`~fatiando.gravmag.euler.EulerDeconv`: The classic 3D solution to
Euler's equation for potential fields (Reid et al., 1990). Runs on the whole
dataset.
* :class:`~fatiando.gravmag.euler.EulerDeconvEW`: Run Euler deconvolution on an
expanding window over the data set and keep the best estimate.
* :class:`~fatiando.gravmag.euler.EulerDeconvMW`: Run Euler deconvolution on a
moving window over the data set to produce a set of estimates.
**References**
Reid, A. B., J. M. Allsop, H. Granser, A. J. Millett, and I. W. Somerton
(1990), Magnetic interpretation in three dimensions using Euler deconvolution,
Geophysics, 55(1), 80-91, doi:10.1190/1.1442774.
----
"""
from __future__ import division, absolute_import
from future.builtins import super
import numpy as np
from .. import gridder
from ..inversion import Misfit
from ..utils import safe_inverse, safe_dot, safe_diagonal
class EulerDeconv(Misfit):
"""
Classic 3D Euler deconvolution of potential field data.
Follows the formulation of Reid et al. (1990). Performs the deconvolution
on the whole data set. For windowed approaches, use
:class:`~fatiando.gravmag.euler.EulerDeconvMW` (moving window)
and
:class:`~fatiando.gravmag.euler.EulerDeconvEW` (expanding window).
Works on any potential field that satisfies Euler's homogeneity equation
(both gravity and magnetic, assuming simple sources):
.. math::
(x_i - x_0)\dfrac{\partial f_i}{\partial x} +
(y_i - y_0)\dfrac{\partial f_i}{\partial y} +
(z_i - z_0)\dfrac{\partial f_i}{\partial z} =
\eta (b - f_i),
in which :math:`f_i` is the given potential field observation at point
:math:`(x_i, y_i, z_i)`, :math:`b` is the base level (a constant shift of
the field, like a regional field), :math:`\eta` is the structural index,
and :math:`(x_0, y_0, z_0)` are the coordinates of a point on the source
(for a sphere, this is the center point).
The Euler deconvolution estimates :math:`(x_0, y_0, z_0)` and :math:`b`
given a potential field and its x, y, z derivatives and the structural
index. However, **this assumes that the sources are ideal** (see the table
below). We recommend reading Reid and Thurston (2014) for a discussion on
what the structural index means and what it does not mean.
.. warning::
Please read the paper Reid et al. (2014) to avoid doing **horrible
things** with Euler deconvolution. Uieda et al. (2014) offer a
practical tutorial using Fatiando code and show some common
misinterpretations.
After Reid et al. (2014), values of the structural index (SI) can be:
===================================== ======== =========
Source type SI (Mag) SI (Grav)
===================================== ======== =========
Point, sphere 3 2
Line, cylinder, thin bed fault 2 1
Thin sheet edge, thin sill, thin dyke 1 0
===================================== ======== =========
Use the :meth:`~fatiando.gravmag.euler.EulerDeconv.fit` method to run the
deconvolution. The estimated coordinates :math:`(x_0, y_0, z_0)` are stored
in the ``estimate_`` attribute and the estimated base level :math:`b` is
stored in ``baselevel_``.
.. note::
Using structural index of 0 is not supported yet.
.. note::
The data does **not** need to be gridded for this! So long as you
can calculate the derivatives of non-gridded data (using an Equivalent
Layer, for example).
.. note:: x is North, y is East, and z is down.
.. note::
Units of the input data (x, y, z, field, derivatives) must be in SI
units! Otherwise, the results will be in strange units. Use functions
in :mod:`fatiando.utils` to convert between units.
Parameters:
* x, y, z : 1d-arrays
The x, y, and z coordinates of the observation points
* field : 1d-array
The potential field measured at the observation points
* xderiv, yderiv, zderiv : 1d-arrays
The x-, y-, and z-derivatives of the potential field (measured or
calculated) at the observation points
* index : float
The structural index of the source
References:
Reid, A. B., J. M. Allsop, H. Granser, A. J. Millett, and I. W. Somerton
(1990), Magnetic interpretation in three dimensions using Euler
deconvolution, Geophysics, 55(1), 80-91, doi:`10.1190/1.1442774
<http://dx.doi.org/10.1190/1.1442774>`__.
Reid, A. B., J. Ebbing, and S. J. Webb (2014), Avoidable Euler Errors – the
use and abuse of Euler deconvolution applied to potential fields,
Geophysical Prospecting, doi:`10.1111/1365-2478.12119
<http://dx.doi.org/10.1111/1365-2478.12119>`__.
Reid, A., and J. Thurston (2014), The structural index in gravity and
magnetic interpretation: Errors, uses, and abuses, GEOPHYSICS, 79(4),
J61-J66, doi:`10.1190/geo2013-0235.1
<http://dx.doi.org/10.1190/geo2013-0235.1>`__.
Uieda, L., V. C. Oliveira Jr., and V. C. F. Barbosa (2014), Geophysical
tutorial: Euler deconvolution of potential-field data, The Leading Edge,
33(4), 448-450, doi:`10.1190/tle33040448.1
<http://dx.doi.org/10.1190/tle33040448.1>`__.
"""
def __init__(self, x, y, z, field, xderiv, yderiv, zderiv,
structural_index):
same_shape = all(i.shape == x.shape
for i in [y, z, field, xderiv, yderiv, zderiv])
assert same_shape, 'All input arrays should have the same shape.'
assert structural_index >= 0, \
"Invalid structural index '{}'. Should be >= 0".format(
structural_index)
super().__init__(
data=-x*xderiv - y*yderiv - z*zderiv - structural_index*field,
nparams=4, islinear=True)
self.x = x
self.y = y
self.z = z
self.field = field
self.xderiv = xderiv
self.yderiv = yderiv
self.zderiv = zderiv
self.structural_index = structural_index
def jacobian(self, p):
jac = np.empty((self.ndata, self.nparams), dtype=np.float)
jac[:, 0] = -self.xderiv
jac[:, 1] = -self.yderiv
jac[:, 2] = -self.zderiv
jac[:, 3] = -self.structural_index*np.ones(self.ndata)
return jac
def predicted(self, p):
return safe_dot(self.jacobian(p), p)
@property
def baselevel_(self):
assert self.p_ is not None, "No estimates found. Run 'fit' first."
return self.p_[3]
def fmt_estimate(self, p):
"""
Separate the (x, y, z) point coordinates from the baselevel.
Coordinates are stored in ``estimate_`` and a base level is stored in
``baselevel_``.
"""
return p[:3]
def _cut_window(self, area):
"""
Return a copy of self with only data that falls inside the given area.
Used by the windowed versions of Euler deconvolution.
Parameters:
* area : list = (x1, x2, y1, y2)
The limiting coordinates of the area
Returns:
* subset
An instance of this class.
"""
x, y = self.x, self.y
x1, x2, y1, y2 = area
indices = ((x >= x1) & (x <= x2) & (y >= y1) & (y <= y2))
slices = [i[indices] for i in [self.x, self.y, self.z, self.field,
self.xderiv, self.yderiv, self.zderiv]]
slices.append(self.structural_index)
return EulerDeconv(*slices)
class EulerDeconvEW(EulerDeconv):
"""
Euler deconvolution using an expanding window scheme.
Uses data inside a window of growing size to perform the Euler
deconvolution. Keeps the best result, judged by the estimated error.
The deconvolution is performed as in
:class:`~fatiando.gravmag.euler.EulerDeconv`.
Use the :meth:`~fatiando.gravmag.euler.EulerDeconvEW.fit` method to produce
an estimate. The estimated point is stored in the attribute ``estimate_``
and the base level in ``baselevel_``.
Parameters:
* x, y, z : 1d-arrays
The x, y, and z coordinates of the observation points
* field : 1d-array
The potential field measured at the observation points
* xderiv, yderiv, zderiv : 1d-arrays
The x-, y-, and z-derivatives of the potential field (measured or
calculated) at the observation points
* index : float
The structural index of the source
* center : [x, y]
The x, y coordinates of the center of the expanding windows.
* sizes : list or 1d-array
The sizes of the windows.
"""
def __init__(self, x, y, z, field, xderiv, yderiv, zderiv,
structural_index, center, sizes):
super().__init__(x, y, z, field, xderiv, yderiv, zderiv,
structural_index)
self.center = center
self.sizes = sizes
def fit(self):
"""
Perform the Euler deconvolution with expanding windows.
The estimated point is stored in ``estimate_``, the base level in
``baselevel_``.
"""
xc, yc = self.center
results = []
errors = []
for size in self.sizes:
ds = 0.5*size
window = [xc - ds, xc + ds, yc - ds, yc + ds]
solver = self._cut_window(window).fit()
# Don't really know why dividing by ndata makes this better but it
# does.
cov = safe_inverse(solver.hessian(solver.p_)/solver.ndata)
uncertainty = np.sqrt(safe_diagonal(cov)[0:3])
mean_error = np.linalg.norm(uncertainty)
errors.append(mean_error)
results.append(solver.p_)
self.p_ = results[np.argmin(errors)]
return self
class EulerDeconvMW(EulerDeconv):
"""
Solve an Euler deconvolution problem using a moving window scheme.
Uses data inside a window moving to perform the Euler deconvolution. Keeps
only a top percentage of the estimates from all windows.
The deconvolution is performed as in
:class:`~fatiando.gravmag.euler.EulerDeconv`.
Use the :meth:`~fatiando.gravmag.euler.EulerDeconvMW.fit` method to produce
an estimate. The estimated points are stored in ``estimate_`` as a 2D numpy
array. Each line in the array is an [x, y, z] coordinate of a point. The
base levels are stored in ``baselevel_``.
Parameters:
* x, y, z : 1d-arrays
The x, y, and z coordinates of the observation points
* field : 1d-array
The potential field measured at the observation points
* xderiv, yderiv, zderiv : 1d-arrays
The x-, y-, and z-derivatives of the potential field (measured or
calculated) at the observation points
* index : float
The structural index of the source
* windows : (ny, nx)
The number of windows in the y and x directions
* size : (dy, dx)
The size of the windows in the y and x directions
* keep : float
Decimal percentage of solutions to keep. Will rank the solutions by
increasing error and keep only the first *keep* percent.
"""
def __init__(self, x, y, z, field, xderiv, yderiv, zderiv,
structural_index, windows, size, keep=0.2):
super().__init__(x, y, z, field, xderiv, yderiv, zderiv,
structural_index)
self.windows = windows
self.size = size
self.keep = keep
self.window_centers = self._get_window_centers()
def _get_window_centers(self):
"""
Calculate the center coordinates of the windows.
Based on the data stored in the given Euler Deconvolution solver.
Returns:
* centers : list
List of [x, y] coordinate pairs for the center of each window.
"""
ny, nx = self.windows
dy, dx = self.size
x, y = self.x, self.y
x1, x2, y1, y2 = x.min(), x.max(), y.min(), y.max()
centers = []
xmidpoints = np.linspace(x1 + 0.5 * dx, x2 - 0.5 * dx, nx)
ymidpoints = np.linspace(y1 + 0.5 * dy, y2 - 0.5 * dy, ny)
for yc in ymidpoints:
for xc in xmidpoints:
centers.append([xc, yc])
return centers
def fit(self):
"""
Perform the Euler deconvolution on a moving window.
The estimated points are stored in ``estimate_``, the base levels in
``baselevel_``.
"""
dy, dx = self.size
paramvecs = []
estimates = []
baselevels = []
errors = []
# Thank you Saulinho for the solution!
# Calculate the mid-points of the windows
for xc, yc in self.window_centers:
window = [xc - 0.5 * dx, xc + 0.5 * dx,
yc - 0.5 * dy, yc + 0.5 * dy]
solver = self._cut_window(window).fit()
cov = safe_inverse(solver.hessian(solver.p_))
uncertainty = np.sqrt(safe_diagonal(cov)[0:3])
mean_error = np.linalg.norm(uncertainty)
errors.append(mean_error)
paramvecs.append(solver.p_)
estimates.append(solver.estimate_)
baselevels.append(solver.baselevel_)
best = np.argsort(errors)[:int(self.keep * len(errors))]
self.p_ = np.array(paramvecs)[best]
return self
@property
def baselevel_(self):
assert self.p_ is not None, "No estimates found. Run 'fit' first."
return self.p_[:, 3]
def fmt_estimate(self, p):
"""
Separate the (x, y, z) point coordinates from the baselevel.
Coordinates are stored in ``estimate_`` and a base level is stored in
``baselevel_``.
"""
return p[:, :3]
| bsd-3-clause | b16ac15de76fb13913e31e7a0c7a7146 | 35.196382 | 79 | 0.598087 | 3.605663 | false | false | false | false |
fatiando/fatiando | fatiando/mesher/geometry.py | 6 | 16438 | """
Defines geometric primitives like prisms, spheres, etc.
"""
from __future__ import division, absolute_import
from future.builtins import object, super
import copy as cp
import numpy as np
class GeometricElement(object):
"""
Base class for all geometric elements.
"""
def __init__(self, props):
self.props = {}
if props is not None:
for p in props:
self.props[p] = props[p]
def addprop(self, prop, value):
"""
Add a physical property to this geometric element.
If it already has the property, the given value will overwrite the
existing one.
Parameters:
* prop : str
Name of the physical property.
* value : float
The value of this physical property.
"""
self.props[prop] = value
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class Polygon(GeometricElement):
"""
A polygon object (2D).
.. note:: Most applications require the vertices to be **clockwise**!
Parameters:
* vertices : list of lists
List of [x, y] pairs with the coordinates of the vertices.
* props : dict
Physical properties assigned to the polygon.
Ex: ``props={'density':10, 'susceptibility':10000}``
Examples::
>>> poly = Polygon([[0, 0], [1, 4], [2, 5]], {'density': 500})
>>> poly.props
{'density': 500}
>>> poly.nverts
3
>>> poly.vertices
array([[0, 0],
[1, 4],
[2, 5]])
>>> poly.x
array([0, 1, 2])
>>> poly.y
array([0, 4, 5])
"""
def __init__(self, vertices, props=None):
super().__init__(props)
self._vertices = np.asarray(vertices)
@property
def vertices(self):
return self._vertices
@property
def nverts(self):
return len(self.vertices)
@property
def x(self):
return self.vertices[:, 0]
@property
def y(self):
return self.vertices[:, 1]
class Square(Polygon):
"""
A square object (2D).
Parameters:
* bounds : list = [x1, x2, y1, y2]
Coordinates of the top right and bottom left corners of the square
* props : dict
Physical properties assigned to the square.
Ex: ``props={'density':10, 'slowness':10000}``
Example::
>>> sq = Square([0, 1, 2, 4], {'density': 750})
>>> sq.bounds
[0, 1, 2, 4]
>>> sq.x1
0
>>> sq.x2
1
>>> sq.props
{'density': 750}
>>> sq.addprop('magnetization', 100)
>>> sq.props['magnetization']
100
A square can be used as a :class:`~fatiando.mesher.Polygon`::
>>> sq.vertices
array([[0, 2],
[1, 2],
[1, 4],
[0, 4]])
>>> sq.x
array([0, 1, 1, 0])
>>> sq.y
array([2, 2, 4, 4])
>>> sq.nverts
4
"""
def __init__(self, bounds, props=None):
super().__init__(None, props)
self.x1, self.x2, self.y1, self.y2 = bounds
@property
def bounds(self):
"""
The x, y boundaries of the square as [xmin, xmax, ymin, ymax]
"""
return [self.x1, self.x2, self.y1, self.y2]
@property
def vertices(self):
"""
The vertices of the square.
"""
verts = np.array(
[[self.x1, self.y1],
[self.x2, self.y1],
[self.x2, self.y2],
[self.x1, self.y2]])
return verts
def __str__(self):
"""Return a string representation of the square."""
names = [('x1', self.x1), ('x2', self.x2), ('y1', self.y1),
('y2', self.y2)]
names.extend((p, self.props[p]) for p in sorted(self.props))
return ' | '.join('%s:%g' % (n, v) for n, v in names)
class Prism(GeometricElement):
"""
A 3D right rectangular prism.
.. note:: The coordinate system used is x -> North, y -> East and z -> Down
Parameters:
* x1, x2 : float
South and north borders of the prism
* y1, y2 : float
West and east borders of the prism
* z1, z2 : float
Top and bottom of the prism
* props : dict
Physical properties assigned to the prism.
Ex: ``props={'density':10, 'magnetization':10000}``
Examples:
>>> from fatiando.mesher import Prism
>>> p = Prism(1, 2, 3, 4, 5, 6, {'density':200})
>>> p.props['density']
200
>>> print p.get_bounds()
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
>>> print p
x1:1 | x2:2 | y1:3 | y2:4 | z1:5 | z2:6 | density:200
>>> p = Prism(1, 2, 3, 4, 5, 6)
>>> print p
x1:1 | x2:2 | y1:3 | y2:4 | z1:5 | z2:6
>>> p.addprop('density', 2670)
>>> print p
x1:1 | x2:2 | y1:3 | y2:4 | z1:5 | z2:6 | density:2670
"""
def __init__(self, x1, x2, y1, y2, z1, z2, props=None):
super().__init__(props)
self.x1 = float(x1)
self.x2 = float(x2)
self.y1 = float(y1)
self.y2 = float(y2)
self.z1 = float(z1)
self.z2 = float(z2)
def __str__(self):
"""Return a string representation of the prism."""
names = [('x1', self.x1), ('x2', self.x2), ('y1', self.y1),
('y2', self.y2), ('z1', self.z1), ('z2', self.z2)]
names.extend((p, self.props[p]) for p in sorted(self.props))
return ' | '.join('%s:%g' % (n, v) for n, v in names)
def get_bounds(self):
"""
Get the bounding box of the prism (i.e., the borders of the prism).
Returns:
* bounds : list
``[x1, x2, y1, y2, z1, z2]``, the bounds of the prism
Examples:
>>> p = Prism(1, 2, 3, 4, 5, 6)
>>> print p.get_bounds()
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
"""
return [self.x1, self.x2, self.y1, self.y2, self.z1, self.z2]
def center(self):
"""
Return the coordinates of the center of the prism.
Returns:
* coords : list = [xc, yc, zc]
Coordinates of the center
Example:
>>> prism = Prism(1, 2, 1, 3, 0, 2)
>>> print prism.center()
[ 1.5 2. 1. ]
"""
xc = 0.5 * (self.x1 + self.x2)
yc = 0.5 * (self.y1 + self.y2)
zc = 0.5 * (self.z1 + self.z2)
return np.array([xc, yc, zc])
class Tesseroid(GeometricElement):
"""
A tesseroid (spherical prism).
Parameters:
* w, e : float
West and east borders of the tesseroid in decimal degrees
* s, n : float
South and north borders of the tesseroid in decimal degrees
* top, bottom : float
Bottom and top of the tesseroid with respect to the mean earth radius
in meters. Ex: if the top is 100 meters above the mean earth radius,
``top=100``, if 100 meters below ``top=-100``.
* props : dict
Physical properties assigned to the tesseroid.
Ex: ``props={'density':10, 'magnetization':10000}``
Examples:
>>> from fatiando.mesher import Tesseroid
>>> t = Tesseroid(1, 2, 3, 4, 6, 5, {'density':200})
>>> t.props['density']
200
>>> print t.get_bounds()
[1.0, 2.0, 3.0, 4.0, 6.0, 5.0]
>>> print t
w:1 | e:2 | s:3 | n:4 | top:6 | bottom:5 | density:200
>>> t = Tesseroid(1, 2, 3, 4, 6, 5)
>>> print t
w:1 | e:2 | s:3 | n:4 | top:6 | bottom:5
>>> t.addprop('density', 2670)
>>> print t
w:1 | e:2 | s:3 | n:4 | top:6 | bottom:5 | density:2670
"""
def __init__(self, w, e, s, n, top, bottom, props=None):
super().__init__(props)
self.w = float(w)
self.e = float(e)
self.s = float(s)
self.n = float(n)
self.bottom = float(bottom)
self.top = float(top)
def __str__(self):
"""Return a string representation of the tesseroid."""
names = [('w', self.w), ('e', self.e), ('s', self.s),
('n', self.n), ('top', self.top), ('bottom', self.bottom)]
names.extend((p, self.props[p]) for p in sorted(self.props))
return ' | '.join('%s:%g' % (n, v) for n, v in names)
def get_bounds(self):
"""
Get the bounding box of the tesseroid (i.e., the borders).
Returns:
* bounds : list
``[w, e, s, n, top, bottom]``, the bounds of the tesseroid
Examples:
>>> t = Tesseroid(1, 2, 3, 4, 6, 5)
>>> print t.get_bounds()
[1.0, 2.0, 3.0, 4.0, 6.0, 5.0]
"""
return [self.w, self.e, self.s, self.n, self.top, self.bottom]
def half(self, lon=True, lat=True, r=True):
"""
Divide the tesseroid in 2 halfs for each dimension (total 8)
The smaller tesseroids will share the large one's props.
Parameters:
* lon, lat, r : True or False
Dimensions along which the tesseroid will be split in half.
Returns:
* tesseroids : list
A list of maximum 8 tesseroids that make up the larger one.
Examples::
>>> tess = Tesseroid(-10, 10, -20, 20, 0, -40, {'density':2})
>>> split = tess.half()
>>> print len(split)
8
>>> for t in split:
... print t
w:-10 | e:0 | s:-20 | n:0 | top:-20 | bottom:-40 | density:2
w:-10 | e:0 | s:-20 | n:0 | top:0 | bottom:-20 | density:2
w:-10 | e:0 | s:0 | n:20 | top:-20 | bottom:-40 | density:2
w:-10 | e:0 | s:0 | n:20 | top:0 | bottom:-20 | density:2
w:0 | e:10 | s:-20 | n:0 | top:-20 | bottom:-40 | density:2
w:0 | e:10 | s:-20 | n:0 | top:0 | bottom:-20 | density:2
w:0 | e:10 | s:0 | n:20 | top:-20 | bottom:-40 | density:2
w:0 | e:10 | s:0 | n:20 | top:0 | bottom:-20 | density:2
>>> tess = Tesseroid(-15, 15, -20, 20, 0, -40)
>>> split = tess.half(lat=False)
>>> print len(split)
4
>>> for t in split:
... print t
w:-15 | e:0 | s:-20 | n:20 | top:-20 | bottom:-40
w:-15 | e:0 | s:-20 | n:20 | top:0 | bottom:-20
w:0 | e:15 | s:-20 | n:20 | top:-20 | bottom:-40
w:0 | e:15 | s:-20 | n:20 | top:0 | bottom:-20
"""
dlon = 0.5 * (self.e - self.w)
dlat = 0.5 * (self.n - self.s)
dh = 0.5 * (self.top - self.bottom)
wests = [self.w, self.w + dlon]
souths = [self.s, self.s + dlat]
bottoms = [self.bottom, self.bottom + dh]
if not lon:
dlon *= 2
wests.pop()
if not lat:
dlat *= 2
souths.pop()
if not r:
dh *= 2
bottoms.pop()
split = [
Tesseroid(i, i + dlon, j, j + dlat, k + dh, k, props=self.props)
for i in wests for j in souths for k in bottoms]
return split
def split(self, nlon, nlat, nh):
"""
Split the tesseroid into smaller ones.
The smaller tesseroids will share the large one's props.
Parameters:
* nlon, nlat, nh : int
The number of sections to split in the longitudinal, latitudinal,
and vertical dimensions
Returns:
* tesseroids : list
A list of nlon*nlat*nh tesseroids that make up the larger one.
Examples::
>>> tess = Tesseroid(-10, 10, -20, 20, 0, -40, {'density':2})
>>> split = tess.split(1, 2, 2)
>>> print len(split)
4
>>> for t in split:
... print t
w:-10 | e:10 | s:-20 | n:0 | top:-20 | bottom:-40 | density:2
w:-10 | e:10 | s:-20 | n:0 | top:0 | bottom:-20 | density:2
w:-10 | e:10 | s:0 | n:20 | top:-20 | bottom:-40 | density:2
w:-10 | e:10 | s:0 | n:20 | top:0 | bottom:-20 | density:2
>>> tess = Tesseroid(-15, 15, -20, 20, 0, -40)
>>> split = tess.split(3, 1, 1)
>>> print len(split)
3
>>> for t in split:
... print t
w:-15 | e:-5 | s:-20 | n:20 | top:0 | bottom:-40
w:-5 | e:5 | s:-20 | n:20 | top:0 | bottom:-40
w:5 | e:15 | s:-20 | n:20 | top:0 | bottom:-40
"""
wests = np.linspace(self.w, self.e, nlon + 1)
souths = np.linspace(self.s, self.n, nlat + 1)
bottoms = np.linspace(self.bottom, self.top, nh + 1)
dlon = wests[1] - wests[0]
dlat = souths[1] - souths[0]
dh = bottoms[1] - bottoms[0]
tesseroids = [
Tesseroid(i, i + dlon, j, j + dlat, k + dh, k, props=self.props)
for i in wests[:-1] for j in souths[:-1] for k in bottoms[:-1]]
return tesseroids
class Sphere(GeometricElement):
"""
A sphere.
.. note:: The coordinate system used is x -> North, y -> East and z -> Down
Parameters:
* x, y, z : float
The coordinates of the center of the sphere
* radius : float
The radius of the sphere
* props : dict
Physical properties assigned to the prism.
Ex: ``props={'density':10, 'magnetization':10000}``
Examples:
>>> s = Sphere(1, 2, 3, 10, {'magnetization':200})
>>> s.props['magnetization']
200
>>> s.addprop('density', 20)
>>> print s.props['density']
20
>>> print s
x:1 | y:2 | z:3 | radius:10 | density:20 | magnetization:200
>>> s = Sphere(1, 2, 3, 4)
>>> print s
x:1 | y:2 | z:3 | radius:4
>>> s.addprop('density', 2670)
>>> print s
x:1 | y:2 | z:3 | radius:4 | density:2670
"""
def __init__(self, x, y, z, radius, props=None):
super().__init__(props)
self.x = float(x)
self.y = float(y)
self.z = float(z)
self.radius = float(radius)
self.center = np.array([x, y, z])
def __str__(self):
"""Return a string representation of the sphere."""
names = [('x', self.x), ('y', self.y), ('z', self.z),
('radius', self.radius)]
names.extend((p, self.props[p]) for p in sorted(self.props))
return ' | '.join('%s:%g' % (n, v) for n, v in names)
class PolygonalPrism(GeometricElement):
"""
A 3D prism with polygonal cross-section.
.. note:: The coordinate system used is x -> North, y -> East and z -> Down
.. note:: *vertices* must be **CLOCKWISE** or will give inverse result.
Parameters:
* vertices : list of lists
Coordinates of the vertices. A list of ``[x, y]`` pairs.
* z1, z2 : float
Top and bottom of the prism
* props : dict
Physical properties assigned to the prism.
Ex: ``props={'density':10, 'magnetization':10000}``
Examples:
>>> verts = [[1, 1], [1, 2], [2, 2], [2, 1]]
>>> p = PolygonalPrism(verts, 0, 3, props={'temperature':25})
>>> p.props['temperature']
25
>>> print p.x
[ 1. 1. 2. 2.]
>>> print p.y
[ 1. 2. 2. 1.]
>>> print p.z1, p.z2
0.0 3.0
>>> p.addprop('density', 2670)
>>> print p.props['density']
2670
"""
def __init__(self, vertices, z1, z2, props=None):
super().__init__(props)
self.x = np.fromiter((v[0] for v in vertices), dtype=np.float)
self.y = np.fromiter((v[1] for v in vertices), dtype=np.float)
self.z1 = float(z1)
self.z2 = float(z2)
self.nverts = len(vertices)
def topolygon(self):
"""
Get the polygon describing the prism viewed from above.
Returns:
* polygon : :func:`fatiando.mesher.Polygon`
The polygon
Example:
>>> verts = [[1, 1], [1, 2], [2, 2], [2, 1]]
>>> p = PolygonalPrism(verts, 0, 100)
>>> poly = p.topolygon()
>>> print poly.x
[ 1. 1. 2. 2.]
>>> print poly.y
[ 1. 2. 2. 1.]
"""
verts = np.transpose([self.x, self.y])
return Polygon(verts, self.props)
| bsd-3-clause | bebbeb00eb2aa7bdacd7df7593ae070d | 27.991182 | 79 | 0.485096 | 3.240931 | false | false | false | false |
fatiando/fatiando | fatiando/_our_duecredit.py | 6 | 1984 | # pylint: skip-file
# emacs: at the end of the file
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### #
"""
Stub file for a guaranteed safe import of duecredit constructs: if duecredit
is not available.
To use it, place it into your project codebase to be imported, e.g. copy as
cp stub.py /path/tomodule/module/due.py
Note that it might be better to avoid naming it duecredit.py to avoid shadowing
installed duecredit.
Then use in your code as
from .due import due, Doi, BibTeX
See https://github.com/duecredit/duecredit/blob/master/README.md for examples.
Origin: Originally a part of the duecredit
Copyright: 2015-2016 DueCredit developers
License: BSD-2
"""
__version__ = '0.0.5'
class InactiveDueCreditCollector(object):
"""Just a stub at the Collector which would not do anything"""
def _donothing(self, *args, **kwargs):
"""Perform no good and no bad"""
pass
def dcite(self, *args, **kwargs):
"""If I could cite I would"""
def nondecorating_decorator(func):
return func
return nondecorating_decorator
cite = load = add = _donothing
def __repr__(self):
return self.__class__.__name__ + '()'
def _donothing_func(*args, **kwargs):
"""Perform no good and no bad"""
pass
try:
from duecredit import due, BibTeX, Doi, Url
if 'due' in locals() and not hasattr(due, 'cite'):
raise RuntimeError(
"Imported due lacks .cite. DueCredit is now disabled")
except Exception as e:
if type(e).__name__ != 'ImportError':
import logging
logging.getLogger("duecredit").error(
"Failed to import duecredit due to %s" % str(e))
# Initiate due stub
due = InactiveDueCreditCollector()
BibTeX = Doi = Url = _donothing_func
# Emacs mode definitions
# Local Variables:
# mode: python
# py-indent-offset: 4
# tab-width: 4
# indent-tabs-mode: nil
# End:
| bsd-3-clause | ebe403c72ba29069a5781c0791be60ab | 26.178082 | 79 | 0.628024 | 3.492958 | false | false | false | false |
fatiando/fatiando | cookbook/gravmag_transform_rtp.py | 9 | 1569 | """
GravMag: Reduction to the pole of a total field anomaly using FFT
"""
from fatiando import mesher, gridder, utils
from fatiando.gravmag import prism, transform
from fatiando.vis import mpl
# Direction of the Geomagnetic field
inc, dec = -60, 0
# Make a model with only induced magnetization
model = [mesher.Prism(-100, 100, -100, 100, 0, 2000,
{'magnetization': utils.ang2vec(10, inc, dec)})]
area = (-5000, 5000, -5000, 5000)
shape = (100, 100)
z0 = -500
x, y, z = gridder.regular(area, shape, z=z0)
tf = utils.contaminate(prism.tf(x, y, z, model, inc, dec),
1, seed=0)
# Reduce to the pole using FFT. Since there is only induced magnetization, the
# magnetization direction (sinc and sdec) is the same as the geomagnetic field
pole = transform.reduce_to_pole(x, y, tf, shape, inc, dec, sinc=inc, sdec=dec)
# Calculate the true value at the pole for comparison
true = prism.tf(x, y, z, model, 90, 0, pmag=utils.ang2vec(10, 90, 0))
fig, axes = mpl.subplots(1, 3, figsize=(14, 4))
for ax in axes:
ax.set_aspect('equal')
mpl.sca(axes[0])
mpl.title("Original total field anomaly")
mpl.contourf(y, x, tf, shape, 30, cmap=mpl.cm.RdBu_r)
mpl.colorbar(pad=0).set_label('nT')
mpl.m2km()
mpl.sca(axes[1])
mpl.title("True value at pole")
mpl.contourf(y, x, true, shape, 30, cmap=mpl.cm.RdBu_r)
mpl.colorbar(pad=0).set_label('nT')
mpl.m2km()
mpl.sca(axes[2])
mpl.title("Reduced to the pole")
mpl.contourf(y, x, pole, shape, 30, cmap=mpl.cm.RdBu_r)
mpl.colorbar(pad=0).set_label('nT')
mpl.m2km()
mpl.tight_layout()
mpl.show()
| bsd-3-clause | 309c238594ee64a580d2428f21b061fb | 34.659091 | 78 | 0.681326 | 2.597682 | false | false | false | false |
fatiando/fatiando | fatiando/mesher/mesh.py | 6 | 32852 | """
Meshes (collections) of geometric objects.
Meshes behave like lists/arrays of geometric objects (they are iterables).
"""
from __future__ import division, absolute_import
from future.builtins import range, object, super
import numpy as np
import scipy.special
import scipy.interpolate
import copy as cp
from .. import gridder
from .geometry import Square, Prism, Sphere, Tesseroid
class SquareMesh(object):
"""
A 2D regular mesh of squares.
For all purposes, :class:`~fatiando.mesher.SquareMesh` can be used as a
list of :class:`~fatiando.mesher.Square`. The order of the squares in the
list is: x directions varies first, then y.
Parameters:
* bounds : list = [x1, x2, y1, y2]
Boundaries of the mesh
* shape : tuple = (ny, nx)
Number of squares in the y and x dimension, respectively
* props : dict
Physical properties of each square in the mesh.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property on
each square of the mesh.
Examples:
>>> mesh = SquareMesh((0, 4, 0, 6), (2, 2))
>>> for s in mesh:
... print s
x1:0 | x2:2 | y1:0 | y2:3
x1:2 | x2:4 | y1:0 | y2:3
x1:0 | x2:2 | y1:3 | y2:6
x1:2 | x2:4 | y1:3 | y2:6
>>> print mesh[1]
x1:2 | x2:4 | y1:0 | y2:3
>>> print mesh[-1]
x1:2 | x2:4 | y1:3 | y2:6
With physical properties::
>>> mesh = SquareMesh((0, 4, 0, 6), (2, 1), {'slowness':[3.4, 8.6]})
>>> for s in mesh:
... print s
x1:0 | x2:4 | y1:0 | y2:3 | slowness:3.4
x1:0 | x2:4 | y1:3 | y2:6 | slowness:8.6
Or::
>>> mesh = SquareMesh((0, 4, 0, 6), (2, 1))
>>> mesh.addprop('slowness', [3.4, 8.6])
>>> for s in mesh:
... print s
x1:0 | x2:4 | y1:0 | y2:3 | slowness:3.4
x1:0 | x2:4 | y1:3 | y2:6 | slowness:8.6
"""
def __init__(self, bounds, shape, props=None):
ny, nx = shape
size = int(nx * ny)
x1, x2, y1, y2 = bounds
dx = (x2 - x1)/nx
dy = (y2 - y1)/ny
self.bounds = bounds
self.shape = tuple(int(i) for i in shape)
self.size = size
self.dims = (dx, dy)
# props has to be None, not {} by default because {} would be permanent
# for all instaces of the class (like a class variable) and changes
# to one instace would lead to changes in another (and a huge mess)
if props is None:
self.props = {}
else:
self.props = props
# The index of the current square in an iteration. Needed when mesh is
# used as an iterator
self.i = 0
# List of masked squares. Will return None if trying to access them
self.mask = []
def __len__(self):
return self.size
def __getitem__(self, index):
# To walk backwards in the list
if index < 0:
index = self.size + index
if index in self.mask:
return None
ny, nx = self.shape
j = index//nx
i = index - j*nx
x1 = self.bounds[0] + self.dims[0] * i
x2 = x1 + self.dims[0]
y1 = self.bounds[2] + self.dims[1] * j
y2 = y1 + self.dims[1]
props = dict([p, self.props[p][index]] for p in self.props)
return Square((x1, x2, y1, y2), props=props)
def __iter__(self):
self.i = 0
return self
def next(self):
if self.i >= self.size:
raise StopIteration
square = self.__getitem__(self.i)
self.i += 1
return square
def addprop(self, prop, values):
"""
Add physical property values to the cells in the mesh.
Different physical properties of the mesh are stored in a dictionary.
Parameters:
* prop : str
Name of the physical property
* values : list or array
The value of this physical property in each square of the mesh.
For the ordering of squares in the mesh see
:class:`~fatiando.mesher.SquareMesh`
"""
self.props[prop] = values
def get_xs(self):
"""
Get a list of the x coordinates of the corners of the cells in the
mesh.
If the mesh has nx cells, get_xs() will return nx + 1 values.
"""
dx, dy = self.dims
x1, x2, y1, y2 = self.bounds
ny, nx = self.shape
xs = np.arange(x1, x2 + dx, dx, 'f')
if len(xs) == nx + 2:
return xs[0:-1]
elif len(xs) == nx:
xs = xs.tolist()
xs.append(x2)
return np.array(xs)
else:
return xs
def get_ys(self):
"""
Get a list of the y coordinates of the corners of the cells in the
mesh.
If the mesh has ny cells, get_ys() will return ny + 1 values.
"""
dx, dy = self.dims
x1, x2, y1, y2 = self.bounds
ny, nx = self.shape
ys = np.arange(y1, y2, dy, 'f')
if len(ys) == ny + 2:
return ys[0:-1]
elif len(ys) == ny:
ys = ys.tolist()
ys.append(y2)
return np.array(ys)
else:
return ys
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class PointGrid(object):
"""
A regular grid of 3D point sources (spheres of unit volume).
Use this as a 1D list of :class:`~fatiando.mesher.Sphere`.
Grid points are ordered like a C matrix, first each row in a column, then
change columns. In this case, the x direction (North-South) are the rows
and y (East-West) are the columns.
Parameters:
* area : list = [x1, x2, y1, y2]
The area where the grid will be spread out
* z : float or 1d-array
The z coordinates of each point in the grid (remember, z is positive
downward).
* shape : tuple = (nx, ny)
The number of points in the x and y directions
* props : dict
Physical properties of each point in the grid.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property for
each point in the grid.
Examples::
>>> g = PointGrid([0, 10, 2, 6], 200, (2, 3))
>>> g.shape
(2, 3)
>>> g.size
6
>>> g[0].center
array([ 0., 2., 200.])
>>> g[-1].center
array([ 10., 6., 200.])
>>> for p in g:
... p.center
array([ 0., 2., 200.])
array([ 0., 4., 200.])
array([ 0., 6., 200.])
array([ 10., 2., 200.])
array([ 10., 4., 200.])
array([ 10., 6., 200.])
>>> g.x.reshape(g.shape)
array([[ 0., 0., 0.],
[ 10., 10., 10.]])
>>> g.y.reshape(g.shape)
array([[ 2., 4., 6.],
[ 2., 4., 6.]])
>>> g.dx, g.dy
(10.0, 2.0)
"""
def __init__(self, area, z, shape, props=None):
self.area = area
self.shape = shape
if props is None:
self.props = {}
else:
self.props = props
nx, ny = shape
self.size = nx*ny
self.z = np.zeros(self.size) + z
self.radius = scipy.special.cbrt(3. / (4. * np.pi))
self.x, self.y = gridder.regular(area, shape)
# The spacing between points
self.dx, self.dy = gridder.spacing(area, shape)
def __len__(self):
return self.size
def __getitem__(self, index):
if not isinstance(index, int):
raise IndexError('Invalid index type. Should be int.')
if index >= self.size or index < -self.size:
raise IndexError('Grid index out of range.')
# To walk backwards in the list
if index < 0:
index = self.size + index
props = dict([p, self.props[p][index]] for p in self.props)
sphere = Sphere(self.x[index], self.y[index], self.z[index],
self.radius, props=props)
return sphere
def __iter__(self):
self.i = 0
return self
def next(self):
if self.i >= self.size:
raise StopIteration
sphere = self.__getitem__(self.i)
self.i += 1
return sphere
def addprop(self, prop, values):
"""
Add physical property values to the points in the grid.
Different physical properties of the grid are stored in a dictionary.
Parameters:
* prop : str
Name of the physical property.
* values : list or array
Value of this physical property in each point of the grid
"""
self.props[prop] = values
def split(self, shape):
"""
Divide the grid into subgrids.
.. note::
Remember that x is the North-South direction and y is East-West.
Parameters:
* shape : tuple = (nx, ny)
Number of subgrids along the x and y directions, respectively.
Returns:
* subgrids : list
List of :class:`~fatiando.mesher.PointGrid`
Examples::
>>> import numpy as np
>>> z = np.linspace(0, 1100, 12)
>>> g = PointGrid((0, 3, 0, 2), z, (4, 3))
>>> g.addprop('bla', [1, 2, 3,
... 4, 5, 6,
... 7, 8, 9,
... 10, 11, 12])
>>> grids = g.split((2, 3))
>>> for s in grids:
... s.props['bla']
array([1, 4])
array([2, 5])
array([3, 6])
array([ 7, 10])
array([ 8, 11])
array([ 9, 12])
>>> for s in grids:
... s.x
array([ 0., 1.])
array([ 0., 1.])
array([ 0., 1.])
array([ 2., 3.])
array([ 2., 3.])
array([ 2., 3.])
>>> for s in grids:
... s.y
array([ 0., 0.])
array([ 1., 1.])
array([ 2., 2.])
array([ 0., 0.])
array([ 1., 1.])
array([ 2., 2.])
>>> for s in grids:
... s.z
array([ 0., 300.])
array([ 100., 400.])
array([ 200., 500.])
array([ 600., 900.])
array([ 700., 1000.])
array([ 800., 1100.])
"""
nx, ny = shape
totalx, totaly = self.shape
if totalx % nx != 0 or totaly % ny != 0:
raise ValueError(
'Cannot split! nx and ny must be divisible by grid shape')
x1, x2, y1, y2 = self.area
xs = np.linspace(x1, x2, totalx)
ys = np.linspace(y1, y2, totaly)
mx, my = (totalx//nx, totaly//ny)
dx, dy = self.dx*(mx - 1), self.dy*(my - 1)
subs = []
for i, xstart in enumerate(xs[::mx]):
for j, ystart in enumerate(ys[::my]):
area = [xstart, xstart + dx, ystart, ystart + dy]
props = {}
for p in self.props:
pmatrix = np.reshape(self.props[p], self.shape)
props[p] = pmatrix[i*mx:(i + 1)*mx,
j*my:(j + 1)*my].ravel()
zmatrix = np.reshape(self.z, self.shape)
zs = zmatrix[i*mx:(i + 1)*mx,
j*my:(j + 1)*my].ravel()
subs.append(PointGrid(area, zs, (mx, my), props))
return subs
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class PrismRelief(object):
"""
A 3D model of a relief (topography) using prisms.
Use to generate:
* topographic model
* basin model
* Moho model
* etc
PrismRelief can used as list of prisms. It acts as an iteratior (so you
can loop over prisms). It also has a ``__getitem__`` method to access
individual elements in the mesh.
In practice, PrismRelief should be able to be passed to any function that
asks for a list of prisms, like :func:`fatiando.gravmag.prism.gz`.
Parameters:
* ref : float
Reference level. Prisms will have:
* bottom on zref and top on z if z > zref;
* bottom on z and top on zref otherwise.
* dims : tuple = (dy, dx)
Dimensions of the prisms in the y and x directions
* nodes : list of lists = [x, y, z]
Coordinates of the center of the top face of each prism.x, y, and z are
lists with the x, y and z coordinates on a regular grid.
"""
def __init__(self, ref, dims, nodes):
x, y, z = nodes
if len(x) != len(y) != len(z):
raise ValueError(
"nodes has x, y, z coordinate arrays of different lengths")
self.x, self.y, self.z = x, y, z
self.size = len(x)
self.ref = ref
self.dy, self.dx = dims
self.props = {}
# The index of the current prism in an iteration. Needed when mesh is
# used as an iterator
self.i = 0
def __len__(self):
return self.size
def __iter__(self):
self.i = 0
return self
def __getitem__(self, index):
# To walk backwards in the list
if index < 0:
index = self.size + index
xc, yc, zc = self.x[index], self.y[index], self.z[index]
x1 = xc - 0.5 * self.dx
x2 = xc + 0.5 * self.dx
y1 = yc - 0.5 * self.dy
y2 = yc + 0.5 * self.dy
if zc <= self.ref:
z1 = zc
z2 = self.ref
else:
z1 = self.ref
z2 = zc
props = dict([p, self.props[p][index]] for p in self.props)
return Prism(x1, x2, y1, y2, z1, z2, props=props)
def next(self):
if self.i >= self.size:
raise StopIteration
prism = self.__getitem__(self.i)
self.i += 1
return prism
def addprop(self, prop, values):
"""
Add physical property values to the prisms.
.. warning:: If the z value of any point in the relief is below the
reference level, its corresponding prism will have the physical
property value with oposite sign than was assigned to it.
Parameters:
* prop : str
Name of the physical property.
* values : list
List or array with the value of this physical property in each
prism of the relief.
"""
def correct(v, i):
if self.z[i] > self.ref:
return -v
return v
self.props[prop] = [correct(v, i) for i, v in enumerate(values)]
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class PrismMesh(object):
"""
A 3D regular mesh of right rectangular prisms.
Prisms are ordered as follows: first layers (z coordinate),
then EW rows (y) and finaly x coordinate (NS).
.. note:: Remember that the coordinate system is x->North, y->East and
z->Down
Ex: in a mesh with shape ``(3,3,3)`` the 15th element (index 14) has z
index 1 (second layer), y index 1 (second row), and x index 2 (third
element in the column).
:class:`~fatiando.mesher.PrismMesh` can used as list of prisms. It acts
as an iteratior (so you can loop over prisms). It also has a
``__getitem__`` method to access individual elements in the mesh.
In practice, :class:`~fatiando.mesher.PrismMesh` should be able to be
passed to any function that asks for a list of prisms, like
:func:`fatiando.gravmag.prism.gz`.
To make the mesh incorporate a topography, use
:meth:`~fatiando.mesher.PrismMesh.carvetopo`
Parameters:
* bounds : list = [xmin, xmax, ymin, ymax, zmin, zmax]
Boundaries of the mesh.
* shape : tuple = (nz, ny, nx)
Number of prisms in the x, y, and z directions.
* props : dict
Physical properties of each prism in the mesh.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property on
each prism of the mesh.
Examples:
>>> from fatiando.mesher import PrismMesh
>>> mesh = PrismMesh((0, 1, 0, 2, 0, 3), (1, 2, 2))
>>> for p in mesh:
... print p
x1:0 | x2:0.5 | y1:0 | y2:1 | z1:0 | z2:3
x1:0.5 | x2:1 | y1:0 | y2:1 | z1:0 | z2:3
x1:0 | x2:0.5 | y1:1 | y2:2 | z1:0 | z2:3
x1:0.5 | x2:1 | y1:1 | y2:2 | z1:0 | z2:3
>>> print mesh[0]
x1:0 | x2:0.5 | y1:0 | y2:1 | z1:0 | z2:3
>>> print mesh[-1]
x1:0.5 | x2:1 | y1:1 | y2:2 | z1:0 | z2:3
One with physical properties::
>>> props = {'density':[2670.0, 1000.0]}
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2), props=props)
>>> for p in mesh:
... print p
x1:0 | x2:1 | y1:0 | y2:4 | z1:0 | z2:3 | density:2670
x1:1 | x2:2 | y1:0 | y2:4 | z1:0 | z2:3 | density:1000
or equivalently::
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2))
>>> mesh.addprop('density', [200, -1000.0])
>>> for p in mesh:
... print p
x1:0 | x2:1 | y1:0 | y2:4 | z1:0 | z2:3 | density:200
x1:1 | x2:2 | y1:0 | y2:4 | z1:0 | z2:3 | density:-1000
You can use :meth:`~fatiando.mesher.PrismMesh.get_xs` (and similar
methods for y and z) to get the x coordinates of the prisms in the mesh::
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2))
>>> print mesh.get_xs()
[ 0. 1. 2.]
>>> print mesh.get_ys()
[ 0. 4.]
>>> print mesh.get_zs()
[ 0. 3.]
The ``shape`` of the mesh must be integer!
>>> mesh = PrismMesh((0, 2, 0, 4, 0, 3), (1, 1, 2.5))
Traceback (most recent call last):
...
AttributeError: Invalid mesh shape (1, 1, 2.5). shape must be integers
"""
celltype = Prism
def __init__(self, bounds, shape, props=None):
nz, ny, nx = shape
if not isinstance(nx, int) or not isinstance(ny, int) or \
not isinstance(nz, int):
raise AttributeError(
'Invalid mesh shape {}. shape must be integers'.format(
str(shape)))
size = int(nx * ny * nz)
x1, x2, y1, y2, z1, z2 = bounds
dx = (x2 - x1)/nx
dy = (y2 - y1)/ny
dz = (z2 - z1)/nz
self.shape = tuple(int(i) for i in shape)
self.size = size
self.dims = (dx, dy, dz)
self.bounds = bounds
if props is None:
self.props = {}
else:
self.props = props
# The index of the current prism in an iteration. Needed when mesh is
# used as an iterator
self.i = 0
# List of masked prisms. Will return None if trying to access them
self.mask = []
# Wether or not to change heights to z coordinate
self.zdown = True
def __len__(self):
return self.size
def __getitem__(self, index):
if index >= self.size or index < -self.size:
raise IndexError('mesh index out of range')
# To walk backwards in the list
if index < 0:
index = self.size + index
if index in self.mask:
return None
nz, ny, nx = self.shape
k = index//(nx*ny)
j = (index - k*(nx*ny))//nx
i = (index - k*(nx*ny) - j*nx)
x1 = self.bounds[0] + self.dims[0] * i
x2 = x1 + self.dims[0]
y1 = self.bounds[2] + self.dims[1] * j
y2 = y1 + self.dims[1]
z1 = self.bounds[4] + self.dims[2] * k
z2 = z1 + self.dims[2]
props = dict([p, self.props[p][index]] for p in self.props)
return self.celltype(x1, x2, y1, y2, z1, z2, props=props)
def __iter__(self):
self.i = 0
return self
def next(self):
if self.i >= self.size:
raise StopIteration
prism = self.__getitem__(self.i)
self.i += 1
return prism
def addprop(self, prop, values):
"""
Add physical property values to the cells in the mesh.
Different physical properties of the mesh are stored in a dictionary.
Parameters:
* prop : str
Name of the physical property.
* values : list or array
Value of this physical property in each prism of the mesh. For the
ordering of prisms in the mesh see
:class:`~fatiando.mesher.PrismMesh`
"""
self.props[prop] = values
def carvetopo(self, x, y, height, below=False):
"""
Mask (remove) prisms from the mesh that are above the topography.
Accessing the ith prism will return None if it was masked (above the
topography).
Also mask prisms outside of the topography grid provided.
The topography height information does not need to be on a regular
grid, it will be interpolated.
Parameters:
* x, y : lists
x and y coordinates of the grid points
* height : list or array
Array with the height of the topography
* below : boolean
Will mask prisms below the input surface if set to *True*.
"""
nz, ny, nx = self.shape
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
# The coordinates of the centers of the cells
xc = np.arange(x1, x2, dx) + 0.5 * dx
# Sometimes arange returns more due to rounding
if len(xc) > nx:
xc = xc[:-1]
yc = np.arange(y1, y2, dy) + 0.5 * dy
if len(yc) > ny:
yc = yc[:-1]
zc = np.arange(z1, z2, dz) + 0.5 * dz
if len(zc) > nz:
zc = zc[:-1]
XC, YC = np.meshgrid(xc, yc)
topo = scipy.interpolate.griddata((x, y), height, (XC, YC),
method='cubic').ravel()
if self.zdown:
# -1 if to transform height into z coordinate
topo = -1 * topo
# griddata returns a masked array. If the interpolated point is out of
# of the data range, mask will be True. Use this to remove all cells
# below a masked topo point (ie, one with no height information)
if np.ma.isMA(topo):
topo_mask = topo.mask
else:
topo_mask = [False for i in range(len(topo))]
c = 0
for cellz in zc:
for h, masked in zip(topo, topo_mask):
if below:
if (masked or
(cellz > h and self.zdown) or
(cellz < h and not self.zdown)):
self.mask.append(c)
else:
if (masked or
(cellz < h and self.zdown) or
(cellz > h and not self.zdown)):
self.mask.append(c)
c += 1
def get_xs(self):
"""
Return an array with the x coordinates of the prisms in mesh.
"""
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
nz, ny, nx = self.shape
xs = np.arange(x1, x2 + dx, dx)
if xs.size > nx + 1:
return xs[:-1]
return xs
def get_ys(self):
"""
Return an array with the y coordinates of the prisms in mesh.
"""
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
nz, ny, nx = self.shape
ys = np.arange(y1, y2 + dy, dy)
if ys.size > ny + 1:
return ys[:-1]
return ys
def get_zs(self):
"""
Return an array with the z coordinates of the prisms in mesh.
"""
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
nz, ny, nx = self.shape
zs = np.arange(z1, z2 + dz, dz)
if zs.size > nz + 1:
return zs[:-1]
return zs
def get_layer(self, i):
"""
Return the set of prisms corresponding to the ith layer of the mesh.
Parameters:
* i : int
The index of the layer
Returns:
* prisms : list of :class:`~fatiando.mesher.Prism`
The prisms in the ith layer
Examples::
>>> mesh = PrismMesh((0, 2, 0, 2, 0, 2), (2, 2, 2))
>>> layer = mesh.get_layer(0)
>>> for p in layer:
... print p
x1:0 | x2:1 | y1:0 | y2:1 | z1:0 | z2:1
x1:1 | x2:2 | y1:0 | y2:1 | z1:0 | z2:1
x1:0 | x2:1 | y1:1 | y2:2 | z1:0 | z2:1
x1:1 | x2:2 | y1:1 | y2:2 | z1:0 | z2:1
>>> layer = mesh.get_layer(1)
>>> for p in layer:
... print p
x1:0 | x2:1 | y1:0 | y2:1 | z1:1 | z2:2
x1:1 | x2:2 | y1:0 | y2:1 | z1:1 | z2:2
x1:0 | x2:1 | y1:1 | y2:2 | z1:1 | z2:2
x1:1 | x2:2 | y1:1 | y2:2 | z1:1 | z2:2
"""
nz, ny, nx = self.shape
if i >= nz or i < 0:
raise IndexError('Layer index %d is out of range.' % (i))
start = i * nx * ny
end = (i + 1) * nx * ny
layer = [self.__getitem__(p) for p in range(start, end)]
return layer
def layers(self):
"""
Returns an iterator over the layers of the mesh.
Examples::
>>> mesh = PrismMesh((0, 2, 0, 2, 0, 2), (2, 2, 2))
>>> for layer in mesh.layers():
... for p in layer:
... print p
x1:0 | x2:1 | y1:0 | y2:1 | z1:0 | z2:1
x1:1 | x2:2 | y1:0 | y2:1 | z1:0 | z2:1
x1:0 | x2:1 | y1:1 | y2:2 | z1:0 | z2:1
x1:1 | x2:2 | y1:1 | y2:2 | z1:0 | z2:1
x1:0 | x2:1 | y1:0 | y2:1 | z1:1 | z2:2
x1:1 | x2:2 | y1:0 | y2:1 | z1:1 | z2:2
x1:0 | x2:1 | y1:1 | y2:2 | z1:1 | z2:2
x1:1 | x2:2 | y1:1 | y2:2 | z1:1 | z2:2
"""
nz, ny, nx = self.shape
for i in range(nz):
yield self.get_layer(i)
def dump(self, meshfile, propfile, prop):
r"""
Dump the mesh to a file in the format required by UBC-GIF program
MeshTools3D.
Parameters:
* meshfile : str or file
Output file to save the mesh. Can be a file name or an open file.
* propfile : str or file
Output file to save the physical properties *prop*. Can be a file
name or an open file.
* prop : str
The name of the physical property in the mesh that will be saved to
*propfile*.
.. note:: Uses -10000000 as the dummy value for plotting topography
Examples:
>>> from StringIO import StringIO
>>> meshfile = StringIO()
>>> densfile = StringIO()
>>> mesh = PrismMesh((0, 10, 0, 20, 0, 5), (1, 2, 2))
>>> mesh.addprop('density', [1, 2, 3, 4])
>>> mesh.dump(meshfile, densfile, 'density')
>>> print meshfile.getvalue().strip()
2 2 1
0 0 0
2*10
2*5
1*5
>>> print densfile.getvalue().strip()
1.0000
3.0000
2.0000
4.0000
"""
if prop not in self.props:
raise ValueError("mesh doesn't have a '%s' property." % (prop))
isstr = False
if isinstance(meshfile, str):
isstr = True
meshfile = open(meshfile, 'w')
nz, ny, nx = self.shape
x1, x2, y1, y2, z1, z2 = self.bounds
dx, dy, dz = self.dims
meshfile.writelines([
"%d %d %d\n" % (ny, nx, nz),
"%g %g %g\n" % (y1, x1, -z1),
"%d*%g\n" % (ny, dy),
"%d*%g\n" % (nx, dx),
"%d*%g" % (nz, dz)])
if isstr:
meshfile.close()
values = np.fromiter(self.props[prop], dtype=np.float)
# Replace the masked cells with a dummy value
values[self.mask] = -10000000
reordered = np.ravel(np.reshape(values, self.shape), order='F')
np.savetxt(propfile, reordered, fmt='%.4f')
def copy(self):
""" Return a deep copy of the current instance."""
return cp.deepcopy(self)
class TesseroidMesh(PrismMesh):
"""
A 3D regular mesh of tesseroids.
Tesseroids are ordered as follows: first layers (height coordinate),
then N-S rows and finaly E-W.
Ex: in a mesh with shape ``(3,3,3)`` the 15th element (index 14) has height
index 1 (second layer), y index 1 (second row), and x index 2 (
third element in the column).
This class can used as list of tesseroids. It acts
as an iteratior (so you can loop over tesseroids).
It also has a ``__getitem__``
method to access individual elements in the mesh.
In practice, it should be able to be
passed to any function that asks for a list of tesseroids, like
:func:`fatiando.gravmag.tesseroid.gz`.
To make the mesh incorporate a topography, use
:meth:`~fatiando.mesher.TesseroidMesh.carvetopo`
Parameters:
* bounds : list = [w, e, s, n, top, bottom]
Boundaries of the mesh. ``w, e, s, n`` in degrees, ``top`` and
``bottom`` are heights (positive upward) and in meters.
* shape : tuple = (nr, nlat, nlon)
Number of tesseroids in the radial, latitude, and longitude directions.
* props : dict
Physical properties of each tesseroid in the mesh.
Each key should be the name of a physical property. The corresponding
value should be a list with the values of that particular property on
each tesseroid of the mesh.
Examples:
>>> from fatiando.mesher import TesseroidMesh
>>> mesh = TesseroidMesh((0, 1, 0, 2, 3, 0), (1, 2, 2))
>>> for p in mesh:
... print p
w:0 | e:0.5 | s:0 | n:1 | top:3 | bottom:0
w:0.5 | e:1 | s:0 | n:1 | top:3 | bottom:0
w:0 | e:0.5 | s:1 | n:2 | top:3 | bottom:0
w:0.5 | e:1 | s:1 | n:2 | top:3 | bottom:0
>>> print mesh[0]
w:0 | e:0.5 | s:0 | n:1 | top:3 | bottom:0
>>> print mesh[-1]
w:0.5 | e:1 | s:1 | n:2 | top:3 | bottom:0
One with physical properties::
>>> props = {'density':[2670.0, 1000.0]}
>>> mesh = TesseroidMesh((0, 2, 0, 4, 3, 0), (1, 1, 2), props=props)
>>> for p in mesh:
... print p
w:0 | e:1 | s:0 | n:4 | top:3 | bottom:0 | density:2670
w:1 | e:2 | s:0 | n:4 | top:3 | bottom:0 | density:1000
or equivalently::
>>> mesh = TesseroidMesh((0, 2, 0, 4, 3, 0), (1, 1, 2))
>>> mesh.addprop('density', [200, -1000.0])
>>> for p in mesh:
... print p
w:0 | e:1 | s:0 | n:4 | top:3 | bottom:0 | density:200
w:1 | e:2 | s:0 | n:4 | top:3 | bottom:0 | density:-1000
You can use :meth:`~fatiando.mesher.PrismMesh.get_xs` (and similar
methods for y and z) to get the x coordinates of the tesseroidss in the
mesh::
>>> mesh = TesseroidMesh((0, 2, 0, 4, 3, 0), (1, 1, 2))
>>> print mesh.get_xs()
[ 0. 1. 2.]
>>> print mesh.get_ys()
[ 0. 4.]
>>> print mesh.get_zs()
[ 3. 0.]
You can iterate over the layers of the mesh::
>>> mesh = TesseroidMesh((0, 2, 0, 2, 2, 0), (2, 2, 2))
>>> for layer in mesh.layers():
... for p in layer:
... print p
w:0 | e:1 | s:0 | n:1 | top:2 | bottom:1
w:1 | e:2 | s:0 | n:1 | top:2 | bottom:1
w:0 | e:1 | s:1 | n:2 | top:2 | bottom:1
w:1 | e:2 | s:1 | n:2 | top:2 | bottom:1
w:0 | e:1 | s:0 | n:1 | top:1 | bottom:0
w:1 | e:2 | s:0 | n:1 | top:1 | bottom:0
w:0 | e:1 | s:1 | n:2 | top:1 | bottom:0
w:1 | e:2 | s:1 | n:2 | top:1 | bottom:0
The ``shape`` of the mesh must be integer!
>>> mesh = TesseroidMesh((0, 2, 0, 4, 0, 3), (1, 1, 2.5))
Traceback (most recent call last):
...
AttributeError: Invalid mesh shape (1, 1, 2.5). shape must be integers
"""
celltype = Tesseroid
def __init__(self, bounds, shape, props=None):
super().__init__(bounds, shape, props)
self.zdown = False
self.dump = None
| bsd-3-clause | 217ba6aac8e81d85aadd2b2cbb0b775f | 31.623635 | 79 | 0.502161 | 3.383316 | false | false | false | false |
desihub/desisim | py/desisim/quickcat.py | 1 | 28069 | '''
desisim.quickcat
================
Code for quickly generating an output zcatalog given fiber assignment tiles,
a truth catalog, and optionally a previous zcatalog.
'''
from __future__ import absolute_import, division, print_function
import os
import yaml
from collections import Counter
from pkg_resources import resource_filename
from time import asctime
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column, vstack
import sys
import scipy.special as sp
import desisim
from desisim.targets import get_simtype
import astropy.constants
c = astropy.constants.c.to('km/s').value
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask
from desiutil.log import get_logger
log = get_logger()
#- redshift errors, zwarn, cata fail rate fractions from
#- /project/projectdirs/desi/datachallenge/redwood/spectro/redux/redwood/
#- sigmav = c sigmaz / (1+z)
_sigma_v = {
# 'ELG': 38.03,
# 'LRG': 67.38,
'BGS': 37.70,
# 'QSO': 182.16,
'STAR': 51.51,
'WD':54.35,
'SKY': 9999, #- meaningless
'UNKNOWN': 9999, #- meaningless
}
_zwarn_fraction = {
# 'ELG': 0.087,
# 'LRG': 0.007,
# 'QSO': 0.020,
'BGS': 0.024,
'STAR': 0.345,
'WD':0.094,
'SKY': 1.0,
'UNKNOWN': 1.0,
}
_cata_fail_fraction = {
# 'ELG': 0.020,
# 'LRG': 0.002,
# 'QSO': 0.012,
'BGS': 0.003,
'STAR': 0.050,
'WD':0.0,
'SKY': 0.,
'UNKNOWN': 0.,
}
def get_zeff_obs(simtype, obsconditions):
'''
'''
if(simtype=='LRG'):
p_v = [1.0, 0.15, -0.5]
p_w = [1.0, 0.4, 0.0]
p_x = [1.0, 0.06, 0.05]
p_y = [1.0, 0.0, 0.08]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.02
elif(simtype=='QSO'):
p_v = [1.0, -0.2, 0.3]
p_w = [1.0, -0.5, 0.6]
p_x = [1.0, -0.1, -0.075]
p_y = [1.0, -0.08, -0.04]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.05
elif(simtype=='ELG'):
p_v = [1.0, -0.1, -0.2]
p_w = [1.0, 0.25, -0.75]
p_x = [1.0, 0.0, 0.05]
p_y = [1.0, 0.2, 0.1]
p_z = [1.0, -10.0, 300.0]
sigma_r = 0.075
else:
log.warning('No model for how observing conditions impact {} redshift efficiency'.format(simtype))
return np.ones(len(obsconditions))
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
# airmass
v = obsconditions['AIRMASS'] - np.mean(obsconditions['AIRMASS'])
pv = p_v[0] + p_v[1] * v + p_v[2] * (v**2. - np.mean(v**2))
# ebmv
# KeyError if dict or Table is missing EBMV
# ValueError if ndarray is missing EBMV
try:
w = obsconditions['EBMV'] - np.mean(obsconditions['EBMV'])
pw = p_w[0] + p_w[1] * w + p_w[2] * (w**2 - np.mean(w**2))
except (KeyError, ValueError):
pw = np.ones(ncond)
# seeing
x = obsconditions['SEEING'] - np.mean(obsconditions['SEEING'])
px = p_x[0] + p_x[1]*x + p_x[2] * (x**2 - np.mean(x**2))
# transparency
try:
y = obsconditions['LINTRANS'] - np.mean(obsconditions['LINTRANS'])
py = p_y[0] + p_y[1]*y + p_y[2] * (y**2 - np.mean(y**2))
except (KeyError, ValueError):
py = np.ones(ncond)
# moon illumination fraction
z = obsconditions['MOONFRAC'] - np.mean(obsconditions['MOONFRAC'])
pz = p_z[0] + p_z[1]*z + p_z[2] * (z**2 - np.mean(z**2))
#- if moon is down phase doesn't matter
pz = np.ones(ncond)
pz[obsconditions['MOONALT'] < 0] = 1.0
pr = 1.0 + np.random.normal(size=ncond, scale=sigma_r)
#- this correction factor can be greater than 1, but not less than 0
pobs = (pv * pw * px * py * pz * pr).clip(min=0.0)
return pobs
def get_redshift_efficiency(simtype, targets, truth, targets_in_tile, obsconditions, params, ignore_obscondition=False):
"""
Simple model to get the redshift effiency from the observational conditions or observed magnitudes+redshuft
Args:
simtype: ELG, LRG, QSO, MWS, BGS
targets: target catalog table; currently used only for TARGETID
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of arrays (observed, p) both with same length as targets
observed: boolean array of whether the target was observed in these tiles
p: probability to get this redshift right
"""
targetid = targets['TARGETID']
n = len(targetid)
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
if (obsconditions is None) or ('OIIFLUX' not in truth.dtype.names):
raise Exception('Missing obsconditions and flux information to estimate redshift efficiency')
if (simtype == 'ELG'):
# Read the model OII flux threshold (FDR fig 7.12 modified to fit redmonster efficiency on OAK)
# filename = resource_filename('desisim', 'data/quickcat_elg_oii_flux_threshold.txt')
# Read the model OII flux threshold (FDR fig 7.12)
filename = resource_filename('desisim', 'data/elg_oii_flux_threshold_fdr.txt')
fdr_z, modified_fdr_oii_flux_threshold = np.loadtxt(filename, unpack=True)
# Compute OII flux thresholds for truez
oii_flux_limit = np.interp(truth['TRUEZ'],fdr_z,modified_fdr_oii_flux_threshold)
oii_flux_limit[oii_flux_limit<1e-20]=1e-20
# efficiency is modeled as a function of flux_OII/f_OII_threshold(z) and an arbitrary sigma_fudge
snr_in_lines = params["ELG"]["EFFICIENCY"]["SNR_LINES_SCALE"]*7*truth['OIIFLUX']/oii_flux_limit
snr_in_continuum = params["ELG"]["EFFICIENCY"]["SNR_CONTINUUM_SCALE"]*true_rflux
snr_tot = np.sqrt(snr_in_lines**2+snr_in_continuum**2)
sigma_fudge = params["ELG"]["EFFICIENCY"]["SIGMA_FUDGE"]
nsigma = 3.
simulated_eff = eff_model(snr_tot,nsigma,sigma_fudge)
elif(simtype == 'LRG'):
r_mag = 22.5 - 2.5*np.log10(true_rflux)
sigmoid_cutoff = params["LRG"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LRG"]["EFFICIENCY"]["SIGMOID_FUDGE"]
simulated_eff = 1./(1.+np.exp((r_mag-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format(simtype,sigmoid_cutoff,sigmoid_fudge))
elif(simtype == 'QSO'):
zsplit = params['QSO_ZSPLIT']
r_mag = 22.5 - 2.5*np.log10(true_rflux)
simulated_eff = np.ones(r_mag.shape)
# lowz tracer qsos
sigmoid_cutoff = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LOWZ_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
ii=(truth['TRUEZ']<=zsplit)
simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LOWZ QSO",sigmoid_cutoff,sigmoid_fudge))
# highz lya qsos
sigmoid_cutoff = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_CUTOFF"]
sigmoid_fudge = params["LYA_QSO"]["EFFICIENCY"]["SIGMOID_FUDGE"]
ii=(truth['TRUEZ']>zsplit)
simulated_eff[ii] = 1./(1.+np.exp((r_mag[ii]-sigmoid_cutoff)/sigmoid_fudge))
log.info("{} eff = sigmoid with cutoff = {:4.3f} fudge = {:4.3f}".format("LYA QSO",sigmoid_cutoff,sigmoid_fudge))
elif simtype == 'BGS':
simulated_eff = 0.98 * np.ones(n)
elif simtype == 'MWS':
simulated_eff = 0.98 * np.ones(n)
else:
default_zeff = 0.98
log.warning('using default redshift efficiency of {} for {}'.format(default_zeff, simtype))
simulated_eff = default_zeff * np.ones(n)
#- Get the corrections for observing conditions per tile, then
#- correct targets on those tiles. Parameterize in terms of failure
#- rate instead of success rate to handle bookkeeping of targets that
#- are observed on more than one tile.
#- NOTE: this still isn't quite right since multiple observations will
#- be simultaneously fit instead of just taking whichever individual one
#- succeeds.
if ignore_obscondition :
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
zeff_obs = np.ones(ncond)
else :
zeff_obs = get_zeff_obs(simtype, obsconditions)
pfail = np.ones(n)
observed = np.zeros(n, dtype=bool)
# More efficient alternative for large numbers of tiles + large target
# list, but requires pre-computing the sort order of targetids.
# Assume targets['TARGETID'] is unique, so not checking this.
sort_targetid = np.argsort(targetid)
# Extract the targets-per-tile lists into one huge list.
concat_targets_in_tile = np.concatenate([targets_in_tile[tileid] for tileid in obsconditions['TILEID']])
ntargets_per_tile = np.array([len(targets_in_tile[tileid]) for tileid in obsconditions['TILEID']])
# Match entries in each tile list against sorted target list.
target_idx = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='left')
target_idx_r = targetid[sort_targetid].searchsorted(concat_targets_in_tile,side='right')
del(concat_targets_in_tile)
# Flag targets in tiles that do not appear in the target list (sky,
# standards).
not_matched = target_idx_r - target_idx == 0
target_idx[not_matched] = -1
del(target_idx_r,not_matched)
# Not every tile has 5000 targets, so use individual counts to
# construct offset of each tile in target_idx.
offset = np.concatenate([[0],np.cumsum(ntargets_per_tile[:-1])])
# For each tile, process targets.
for i, tileid in enumerate(obsconditions['TILEID']):
if ntargets_per_tile[i] > 0:
# Quickly get all the matched targets on this tile.
targets_this_tile = target_idx[offset[i]:offset[i]+ntargets_per_tile[i]]
targets_this_tile = targets_this_tile[targets_this_tile > 0]
# List of indices into sorted target list for each observed
# source.
ii = sort_targetid[targets_this_tile]
tmp = (simulated_eff[ii]*zeff_obs[i]).clip(0, 1)
pfail[ii] *= (1-tmp)
observed[ii] = True
simulated_eff = (1-pfail)
return observed, simulated_eff
# Efficiency model
def eff_model(x, nsigma, sigma, max_efficiency=1):
return 0.5*max_efficiency*(1.+sp.erf((x-nsigma)/(np.sqrt(2.)*sigma)))
def reverse_dictionary(a):
"""Inverts a dictionary mapping.
Args:
a: input dictionary.
Returns:
b: output reversed dictionary.
"""
b = {}
for i in a.items():
try:
for k in i[1]:
if k not in b.keys():
b[k] = [i[0]]
else:
b[k].append(i[0])
except:
k = i[1]
if k not in b.keys():
b[k] = [i[0]]
else:
b[k].append(i[0])
return b
def get_observed_redshifts(targets, truth, targets_in_tile, obsconditions, parameter_filename=None, ignore_obscondition=False):
"""
Returns observed z, zerr, zwarn arrays given true object types and redshifts
Args:
targets: target catalog table; currently used only for target mask bits
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of (zout, zerr, zwarn)
"""
if parameter_filename is None :
# Load efficiency parameters yaml file
parameter_filename = resource_filename('desisim', 'data/quickcat.yaml')
params=None
with open(parameter_filename,"r") as file :
params = yaml.safe_load(file)
simtype = get_simtype(np.char.strip(truth['TRUESPECTYPE']), targets['DESI_TARGET'], targets['BGS_TARGET'], targets['MWS_TARGET'])
#simtype = get_simtype(np.char.strip(truth['TEMPLATETYPE']), targets['DESI_TARGET'], targets['BGS_TARGET'], targets['MWS_TARGET'])
truez = truth['TRUEZ']
targetid = truth['TARGETID']
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
zout = truez.copy()
zerr = np.zeros(len(truez), dtype=np.float32)
zwarn = np.zeros(len(truez), dtype=np.int32)
objtypes = list(set(simtype))
n_tiles = len(np.unique(obsconditions['TILEID']))
if(n_tiles!=len(targets_in_tile)):
raise ValueError('Number of obsconditions {} != len(targets_in_tile) {}'.format(n_tiles, len(targets_in_tile)))
for objtype in objtypes:
ii=(simtype==objtype)
###################################
# redshift errors
###################################
if objtype =='ELG' :
sigma = params["ELG"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["ELG"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
oiiflux = truth['OIIFLUX'][ii]*1e17
zerr[ii] = sigma/(1.e-9+oiiflux**powerlawindex)*(1.+truez[ii])
zout[ii] += np.random.normal(scale=zerr[ii])
log.info("ELG sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[ii])))
elif objtype == 'LRG' :
sigma = params["LRG"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LRG"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
zerr[ii] = sigma/(1.e-9+true_rflux[ii]**powerlawindex)*(1.+truez[ii])
zout[ii] += np.random.normal(scale=zerr[ii])
log.info("LRG sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[ii])))
elif objtype == 'QSO' :
zsplit = params['QSO_ZSPLIT']
sigma = params["LOWZ_QSO"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LOWZ_QSO"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
jj=ii&(truth['TRUEZ']<=zsplit)
zerr[jj] = sigma/(1.e-9+(true_rflux[jj])**powerlawindex)*(1.+truez[jj])
log.info("LOWZ QSO sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(sigma,powerlawindex,np.median(zerr[jj])))
sigma = params["LYA_QSO"]["UNCERTAINTY"]["SIGMA_17"]
powerlawindex = params["LYA_QSO"]["UNCERTAINTY"]["POWER_LAW_INDEX"]
jj=ii&(truth['TRUEZ']>zsplit)
zerr[jj] = sigma/(1.e-9+(true_rflux[jj])**powerlawindex)*(1.+truez[jj])
if np.count_nonzero(jj) > 0:
log.info("LYA QSO sigma={:6.5f} index={:3.2f} median zerr={:6.5f}".format(
sigma,powerlawindex,np.median(zerr[jj])))
else:
log.warning("No LyA QSO generated")
zout[ii] += np.random.normal(scale=zerr[ii])
elif objtype in _sigma_v.keys() :
log.info("{} use constant sigmav = {} km/s".format(objtype,_sigma_v[objtype]))
ii = (simtype == objtype)
zerr[ii] = _sigma_v[objtype] * (1+truez[ii]) / c
zout[ii] += np.random.normal(scale=zerr[ii])
else :
log.info("{} no redshift error model, will use truth")
###################################
# redshift efficiencies
###################################
# Set ZWARN flags for some targets
# the redshift efficiency only sets warning, but does not impact
# the redshift value and its error.
was_observed, goodz_prob = get_redshift_efficiency(
objtype, targets[ii], truth[ii], targets_in_tile,
obsconditions=obsconditions,params=params,
ignore_obscondition=ignore_obscondition)
n=np.sum(ii)
assert len(was_observed) == n
assert len(goodz_prob) == n
r = np.random.random(len(was_observed))
zwarn[ii] = 4 * (r > goodz_prob) * was_observed
###################################
# catastrophic failures
###################################
zlim=[0.,3.5]
cata_fail_fraction = np.zeros(n)
if objtype == "ELG" :
cata_fail_fraction[:] = params["ELG"]["FAILURE_RATE"]
zlim=[0.6,1.7]
elif objtype == "LRG" :
cata_fail_fraction[:] = params["LRG"]["FAILURE_RATE"]
zlim=[0.5,1.1]
elif objtype == "QSO" :
zsplit = params["QSO_ZSPLIT"]
cata_fail_fraction[truth['TRUEZ'][ii]<=zsplit] = params["LOWZ_QSO"]["FAILURE_RATE"]
cata_fail_fraction[truth['TRUEZ'][ii]>zsplit] = params["LYA_QSO"]["FAILURE_RATE"]
zlim=[0.5,3.5]
elif objtype in _cata_fail_fraction :
cata_fail_fraction[:] = _cata_fail_fraction[objtype]
failed = (np.random.uniform(size=n)<cata_fail_fraction)&(zwarn[ii]==0)
failed_indices = np.where(ii)[0][failed]
log.info("{} n_failed/n_tot={}/{}={:4.3f}".format(objtype,failed_indices.size,n,failed_indices.size/float(n)))
zout[failed_indices] = np.random.uniform(zlim[0],zlim[1],failed_indices.size)
return zout, zerr, zwarn
def get_median_obsconditions(tileids):
"""Gets the observational conditions for a set of tiles.
Args:
tileids : list of tileids that were observed
Returns:
Table with the observational conditions for every tile.
It inclues at least the following columns::
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
"""
#- Load standard DESI tiles and trim to this list of tileids
import desimodel.io
tiles = desimodel.io.load_tiles()
tileids = np.asarray(tileids)
ii = np.in1d(tiles['TILEID'], tileids)
tiles = tiles[ii]
assert len(tiles) == len(tileids)
#- Sort tiles to match order of tileids
i = np.argsort(tileids)
j = np.argsort(tiles['TILEID'])
k = np.argsort(i)
tiles = tiles[j[k]]
assert np.all(tiles['TILEID'] == tileids)
#- fix type bug after reading desi-tiles.fits
if tiles['OBSCONDITIONS'].dtype == np.float64:
tiles = Table(tiles)
tiles.replace_column('OBSCONDITIONS', tiles['OBSCONDITIONS'].astype(int))
n = len(tileids)
obsconditions = Table()
obsconditions['TILEID'] = tileids
obsconditions['AIRMASS'] = tiles['AIRMASS']
obsconditions['EBMV'] = tiles['EBV_MED']
obsconditions['LINTRANS'] = np.ones(n)
obsconditions['SEEING'] = np.ones(n) * 1.1
#- Add lunar conditions, defaulting to dark time
from desitarget.targetmask import obsconditions as obsbits
obsconditions['MOONFRAC'] = np.zeros(n)
obsconditions['MOONALT'] = -20.0 * np.ones(n)
obsconditions['MOONSEP'] = 180.0 * np.ones(n)
ii = (tiles['OBSCONDITIONS'] & obsbits.GRAY) != 0
obsconditions['MOONFRAC'][ii] = 0.1
obsconditions['MOONALT'][ii] = 10.0
obsconditions['MOONSEP'][ii] = 60.0
ii = (tiles['OBSCONDITIONS'] & obsbits.BRIGHT) != 0
obsconditions['MOONFRAC'][ii] = 0.7
obsconditions['MOONALT'][ii] = 60.0
obsconditions['MOONSEP'][ii] = 50.0
return obsconditions
def quickcat(tilefiles, targets, truth, fassignhdu='FIBERASSIGN', zcat=None, obsconditions=None, perfect=False):
"""
Generates quick output zcatalog
Args:
tilefiles : list of fiberassign tile files that were observed
targets : astropy Table of targets
truth : astropy Table of input truth with columns TARGETID, TRUEZ, and TRUETYPE
zcat (optional): input zcatalog Table from previous observations
obsconditions (optional): Table or ndarray with observing conditions from surveysim
perfect (optional): if True, treat spectro pipeline as perfect with input=output,
otherwise add noise and zwarn!=0 flags
Returns:
zcatalog astropy Table based upon input truth, plus ZERR, ZWARN,
NUMOBS, and TYPE columns
"""
#- convert to Table for easier manipulation
if not isinstance(truth, Table):
truth = Table(truth)
#- Count how many times each target was observed for this set of tiles
log.info('{} QC Reading {} tiles'.format(asctime(), len(tilefiles)))
nobs = Counter()
targets_in_tile = {}
tileids = list()
for infile in tilefiles:
fibassign, header = fits.getdata(infile, fassignhdu, header=True)
# hack needed here rnc 7/26/18
if 'TILEID' in header:
tileidnew = header['TILEID']
else:
fnew=infile.split('/')[-1]
tileidnew=fnew.split("_")[-1]
tileidnew=int(tileidnew[:-5])
log.error('TILEID missing from {} header'.format(fnew))
log.error('{} -> TILEID {}'.format(infile, tileidnew))
tileids.append(tileidnew)
ii = (fibassign['TARGETID'] != -1) #- targets with assignments
nobs.update(fibassign['TARGETID'][ii])
targets_in_tile[tileidnew] = fibassign['TARGETID'][ii]
#- Trim obsconditions to just the tiles that were observed
if obsconditions is not None:
ii = np.in1d(obsconditions['TILEID'], tileids)
if np.any(ii == False):
obsconditions = obsconditions[ii]
assert len(obsconditions) > 0
#- Sort obsconditions to match order of tiles
#- This might not be needed, but is fast for O(20k) tiles and may
#- prevent future surprises if code expects them to be row aligned
tileids = np.array(tileids)
if (obsconditions is not None) and \
(np.any(tileids != obsconditions['TILEID'])):
i = np.argsort(tileids)
j = np.argsort(obsconditions['TILEID'])
k = np.argsort(i)
obsconditions = obsconditions[j[k]]
assert np.all(tileids == obsconditions['TILEID'])
#- Trim truth down to just ones that have already been observed
log.info('{} QC Trimming truth to just observed targets'.format(asctime()))
obs_targetids = np.array(list(nobs.keys()))
iiobs = np.in1d(truth['TARGETID'], obs_targetids)
truth = truth[iiobs]
targets = targets[iiobs]
#- Construct initial new z catalog
log.info('{} QC Constructing new redshift catalog'.format(asctime()))
newzcat = Table()
newzcat['TARGETID'] = truth['TARGETID']
if 'BRICKNAME' in truth.dtype.names:
newzcat['BRICKNAME'] = truth['BRICKNAME']
else:
newzcat['BRICKNAME'] = np.zeros(len(truth), dtype=(str, 8))
#- Copy TRUETYPE -> SPECTYPE so that we can change without altering original
newzcat['SPECTYPE'] = truth['TRUESPECTYPE'].copy()
#- Add ZERR and ZWARN
log.info('{} QC Adding ZERR and ZWARN'.format(asctime()))
nz = len(newzcat)
if perfect:
newzcat['Z'] = truth['TRUEZ'].copy()
newzcat['ZERR'] = np.zeros(nz, dtype=np.float32)
newzcat['ZWARN'] = np.zeros(nz, dtype=np.int32)
else:
# get the observational conditions for the current tilefiles
if obsconditions is None:
obsconditions = get_median_obsconditions(tileids)
# get the redshifts
z, zerr, zwarn = get_observed_redshifts(targets, truth, targets_in_tile, obsconditions)
newzcat['Z'] = z #- update with noisy redshift
newzcat['ZERR'] = zerr
newzcat['ZWARN'] = zwarn
#- Add numobs column
log.info('{} QC Adding NUMOBS column'.format(asctime()))
newzcat.add_column(Column(name='NUMOBS', length=nz, dtype=np.int32))
for i in range(nz):
newzcat['NUMOBS'][i] = nobs[newzcat['TARGETID'][i]]
#- Merge previous zcat with newzcat
log.info('{} QC Merging previous zcat'.format(asctime()))
if zcat is not None:
#- don't modify original
#- Note: this uses copy on write for the columns to be memory
#- efficient while still letting us modify a column if needed
zcat = zcat.copy()
# needed to have the same ordering both in zcat and newzcat
# to ensure consistent use of masks from np.in1d()
zcat.sort(keys='TARGETID')
newzcat.sort(keys='TARGETID')
#- targets that are in both zcat and newzcat
repeats = np.in1d(zcat['TARGETID'], newzcat['TARGETID'])
#- update numobs in both zcat and newzcat
ii = np.in1d(newzcat['TARGETID'], zcat['TARGETID'][repeats])
orig_numobs = zcat['NUMOBS'][repeats].copy()
new_numobs = newzcat['NUMOBS'][ii].copy()
zcat['NUMOBS'][repeats] += new_numobs
newzcat['NUMOBS'][ii] += orig_numobs
#- replace only repeats that had ZWARN flags in original zcat
#- replace in new
replace = repeats & (zcat['ZWARN'] != 0)
jj = np.in1d(newzcat['TARGETID'], zcat['TARGETID'][replace])
zcat[replace] = newzcat[jj]
#- trim newzcat to ones that shouldn't override original zcat
discard = np.in1d(newzcat['TARGETID'], zcat['TARGETID'])
newzcat = newzcat[~discard]
#- Should be non-overlapping now
assert np.all(np.in1d(zcat['TARGETID'], newzcat['TARGETID']) == False)
#- merge them
newzcat = vstack([zcat, newzcat])
#- check for duplicates
targetids = newzcat['TARGETID']
assert len(np.unique(targetids)) == len(targetids)
#- Metadata for header
newzcat.meta['EXTNAME'] = 'ZCATALOG'
#newzcat.sort(keys='TARGETID')
log.info('{} QC done'.format(asctime()))
return newzcat
| bsd-3-clause | 1e6382d1a3a3ab30ecd64b2224e4cd4b | 37.609354 | 153 | 0.600021 | 3.291393 | false | false | false | false |
desihub/desisim | py/desisim/qso_template/qso_pca.py | 1 | 4895 | """
desisim.qso_template.qso_pca
============================
Module for generate QSO PCA templates
24-Nov-2014 by JXP
"""
from __future__ import print_function, absolute_import, division
import numpy as np
import os
import multiprocessing as mp
from astropy.io import fits
flg_xdb = True
try:
from xastropy.xutils import xdebug as xdb
except ImportError:
flg_xdb = False
#
def read_qso_eigen(eigen_fil=None):
'''
Input the QSO Eigenspectra
'''
# File
if eigen_fil is None:
eigen_fil = os.environ.get('IDLSPEC2D_DIR')+'/templates/spEigenQSO-55732.fits'
print('Using these eigen spectra: {:s}'.format(eigen_fil))
hdu = fits.open(eigen_fil)
eigen = hdu[0].data
head = hdu[0].header
# Rest wavelength
eigen_wave = 10.**(head['COEFF0'] + np.arange(head['NAXIS1'])*head['COEFF1'])
# Return
return eigen, eigen_wave
##
def fit_eigen(flux,ivar,eigen_flux):
'''
Fit the spectrum with the eigenvectors.
Pass back the coefficients
'''
#C = np.diag(1./ivar)
Cinv = np.diag(ivar)
A = eigen_flux.T
#alpha = np.dot(A.T, np.linalg.solve(C, A)) # Numerical Recipe notation
alpha = np.dot(A.T, np.dot(Cinv,A))
cov = np.linalg.inv(alpha)
#beta = np.dot(A.T, np.linalg.solve(C, y))
beta = np.dot(A.T, np.dot(Cinv, flux))
acoeff = np.dot(cov, beta)
# Return
return acoeff
##
def do_boss_lya_parallel(istart, iend, output, debug=False, cut_Lya=True):
'''
Generate PCA coeff for the BOSS Lya DR10 dataset, v2.1
Parameters
----------
cut_Lya: boolean (True)
Avoid using the Lya forest in the analysis
'''
# Eigen
eigen, eigen_wave = read_qso_eigen()
# Open the BOSS catalog file
boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
bcat_hdu = fits.open(boss_cat_fil)
t_boss = bcat_hdu[1].data
nqso = len(t_boss)
pca_val = np.zeros((iend-istart, 4))
# Loop us -- Should spawn on multiple CPU
#for ii in range(nqso):
datdir = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_spectra_v2.1/'
jj = 0
for ii in range(istart,iend):
if (ii % 100) == 0:
print('ii = {:d}'.format(ii))
# Spectrum file
pnm = str(t_boss['PLATE'][ii])
fnm = str(t_boss['FIBERID'][ii]).rjust(4,str('0'))
mjd = str(t_boss['MJD'][ii])
sfil = datdir+pnm+'/speclya-'
sfil = sfil+pnm+'-'+mjd+'-'+fnm+'.fits.gz'
# Read spectrum
spec_hdu = fits.open(sfil)
t = spec_hdu[1].data
flux = t['flux']
wave = 10.**t['loglam']
ivar = t['ivar']
zqso = t_boss['z_pipe'][ii]
wrest = wave / (1+zqso)
wlya = 1215.67
# Cut Lya forest?
if cut_Lya is True:
Ly_imn = np.argmin(np.abs(wrest-wlya))
else:
Ly_imn = 0
# Pack
imn = np.argmin(np.abs(wrest[Ly_imn]-eigen_wave))
npix = len(wrest[Ly_imn:])
imx = npix+imn
eigen_flux = eigen[:,imn:imx]
# FIT
acoeff = fit_eigen(flux[Ly_imn:], ivar[Ly_imn:], eigen_flux)
pca_val[jj,:] = acoeff
jj += 1
# Check
if debug is True:
model = np.dot(eigen.T,acoeff)
if flg_xdb is True:
xdb.xplot(wrest, flux, xtwo=eigen_wave, ytwo=model)
xdb.set_trace()
#xdb.set_trace()
if output is not None:
output.put((istart,iend,pca_val))
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
# Run
#do_boss_lya_parallel(0,10,None,debug=True,cut_Lya=True)
#xdb.set_trace()
## ############################
# Parallel
boss_cat_fil = os.environ.get('BOSSPATH')+'/DR10/BOSSLyaDR10_cat_v2.1.fits.gz'
bcat_hdu = fits.open(boss_cat_fil)
t_boss = bcat_hdu[1].data
nqso = len(t_boss)
nqso = 45 # Testing
output = mp.Queue()
processes = []
nproc = 4
nsub = nqso // nproc
# Setup the Processes
for ii in range(nproc):
# Generate
istrt = ii * nsub
if ii == (nproc-1):
iend = nqso
else:
iend = (ii+1)*nsub
#xdb.set_trace()
process = mp.Process(target=do_boss_lya_parallel,
args=(istrt,iend,output))
processes.append(process)
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
# Get process results from the output queue
results = [output.get() for p in processes]
# Bring together
#sorted(results, key=lambda result: result[0])
#all_is = [ir[0] for ir in results]
pca_val = np.zeros((nqso, 4))
for ir in results:
pca_val[ir[0]:ir[1],:] = ir[2]
xdb.set_trace()
| bsd-3-clause | f973055dc2defdb75b806fb31743ab58 | 25.459459 | 86 | 0.544433 | 2.990226 | false | false | false | false |
desihub/desisim | py/desisim/transients.py | 1 | 24932 | """Module for defining interface to transient models.
"""
from abc import ABC, abstractmethod
from astropy import units as u
import numpy as np
from desiutil.log import get_logger, DEBUG
# Hide sncosmo import from the module.
try:
import sncosmo
log = get_logger(DEBUG)
log.info('Enabling sncosmo models.')
use_sncosmo = True
except ImportError as e:
log = get_logger(DEBUG)
log.warning('{}; disabling sncosmo models.'.format(e))
use_sncosmo = False
class Transient(ABC):
"""Abstract base class to enforce interface for transient flux models."""
def __init__(self, modelname, modeltype):
self.model = modelname
self.type = modeltype
self.hostratio = 1.
self.phase = 0.*u.day
@abstractmethod
def minwave(self):
pass
@abstractmethod
def maxwave(self):
pass
@abstractmethod
def mintime(self):
pass
@abstractmethod
def maxtime(self):
pass
@abstractmethod
def set_model_pars(modelpars):
pass
@abstractmethod
def flux(self, t, wl):
pass
if use_sncosmo:
class Supernova(Transient):
def __init__(self, modelname, modeltype, modelpars):
"""Initialize a built-in supernova model from the sncosmo package.
Parameters
----------
modelname : str
Name of the model.
modeltype : str
Type or class of the model [Ia, IIP, ...].
modelpars : dict
Parameters used to initialize the model.
"""
super().__init__(modelname, modeltype)
# In sncosmo, some models have t0=tmax, and others have t0=0.
# These lines ensure that for our purposes t0=tmax=0 for all models.
self.t0 = modelpars['t0'] * u.day
modelpars['t0'] = 0.
self.snmodel = sncosmo.Model(self.model)
self.set_model_pars(modelpars)
def minwave(self):
"""Return minimum wavelength stored in model."""
return self.snmodel.minwave() * u.Angstrom
def maxwave(self):
"""Return maximum wavelength stored in model."""
return self.snmodel.maxwave() * u.Angstrom
def mintime(self):
"""Return minimum time used in model (peak light at t=0)."""
return self.snmodel.mintime() * u.day - self.t0
def maxtime(self):
"""Return maximum time used in model (peak light at t=0)."""
return self.snmodel.maxtime() * u.day - self.t0
def set_model_pars(self, modelpars):
"""Set sncosmo model parameters.
Parameters
----------
modelpars : dict
Parameters used to initialize the internal model.
"""
self.snmodel.set(**modelpars)
def flux(self, t, wl):
"""Return flux vs wavelength at a given time t.
Parameters
----------
t : float or astropy.units.quantity.Quantity
Time of observation, with t=0 representing max light.
wl : list or ndarray
Wavelength array to compute the flux.
Returns
-------
flux : list or ndarray
Normalized flux array as a function of wavelength.
"""
# Time should be expressed w.r.t. maximum, in days.
if type(t) is u.quantity.Quantity:
self.phase = t
else:
self.phase = t * u.day
time_ = (self.phase + self.t0).to('day').value
# Convert wavelength to angstroms.
wave_ = wl.to('Angstrom').value if type(wl) is u.quantity.Quantity else wl
flux = self.snmodel.flux(time_, wl)
return flux / np.sum(flux)
class TabularModel(Transient):
def __init__(self, modelname, modeltype, filename, filefmt):
"""Initialize a model from tabular data in an external file.
Parameters
----------
modelname : str
Name of the model.
modeltype : str
Type or class of the model [TDE, AGN, ...].
filename : str
File with columns of wavelength and flux.
filefmt : str
File format (ascii, csv, fits, hdf5, ...).
"""
super().__init__(modelname, modeltype)
from astropy.table import Table
data = Table.read(filename, format=filefmt, names=['wavelength','flux'])
self.wave_ = data['wavelength'].data
self.flux_ = data['flux'].data
from scipy.interpolate import PchipInterpolator
self.fvsw_ = PchipInterpolator(self.wave_, self.flux_)
def minwave(self):
"""Return minimum wavelength stored in model."""
return self.wave_[0] * u.Angstrom
def maxwave(self):
"""Return maximum wavelength stored in model."""
return self.wave_[-1] * u.Angstrom
def mintime(self):
"""Return minimum time used in model (peak light at t=0)."""
return 0 * u.day
def maxtime(self):
"""Return maximum time used in model (peak light at t=0)."""
return 1 * u.day
def set_model_pars(self, modelpars):
"""Set model parameters.
Parameters
----------
modelpars : dict
Parameters used to initialize the internal model.
"""
pass
def flux(self, t, wl):
"""Return flux vs wavelength at a given time t.
Parameters
----------
t : float or astropy.units.quantity.Quantity
Time of observation, with t=0 representing max light.
wl : list or ndarray
Wavelength array to compute the flux.
Returns
-------
flux : list or ndarray
Normalized flux array as a function of wavelength.
"""
# Convert wavelength to angstroms.
wave_ = wl.to('Angstrom').value if type(wl) is u.quantity.Quantity else wl
flux = self.fvsw_(wave_)
return flux / np.sum(flux)
class ModelBuilder:
"""A class which can build a transient model. It allows the TransientModels
object registry to register the model without instantiating it until it's
needed. This is handy because some models take time and memory to
instantiate.
"""
def __init__(self, modelclass):
"""Initialize the ModelBuilder with a type of model.
Parameters
----------
modelclass : Transient
A subclass of Transient, e.g., Supernova or TabularModel.
"""
self._instance = None
self._modclass = modelclass
def __call__(self, modelpars):
"""Instantiate a model using a list of modelpars.
Parameters
----------
modelpars : dict
Parameters needed to create a TabularModel (modelname, modeltype, filename, filefmt).
Returns
-------
instance : subclass of Transient (Supernova, TabularModel, etc.).
"""
if self._instance is None:
self._instance = self._modclass(**modelpars)
return self._instance
class TransientModels:
def __init__(self):
"""Create a registry of transient model builder classes, model types,
and model parameters.
"""
self._builders = {}
self._modelpars = {}
self._types = {}
def register_builder(self, modelpars, builder):
"""Register a model builder.
Parameters
----------
modelpars : dict
Dictionary of model parameters (type, name, params).
builder :
A Transient builder class which instantiates a Transient.
"""
modtype, modname = modelpars['modeltype'], modelpars['modelname']
if modtype in self._types:
self._types[modtype].append(modname)
else:
self._types[modtype] = [modname]
self._builders[modname] = builder
self._modelpars[modname] = modelpars
def get_model(self, modelname):
"""Given a model name, returns a Transient using its builder.
Parameters
----------
modelname : str
Name of registered Transient model.
Returns
-------
instance : Transient
Instance of a registered transient.
"""
builder = self._builders.get(modelname)
modelpars = self._modelpars.get(modelname)
if not builder:
raise ValueError(modelname)
return builder(modelpars)
def get_type_dict(self):
"""Return a dictionary of registered model types.
Returns
-------
types : dict
Dictionary of types and models.
"""
return self._types
def get_type(self, modeltype):
"""Given a Transient type, randomly return a registered model of that
type.
Parameters
----------
modeltype : str
Transient type (Ia, Ib, IIP, ...).
Returns
-------
instance : Transient
A registered Transient of the requested type.
"""
mtype = self._types.get(modeltype)
if not mtype:
raise ValueError(modeltype)
mname = np.random.choice(mtype)
return self.get_model(mname)
def __str__(self):
"""A list of registered transient types and model names.
Returns
-------
repr : str
Representation of registered model types and names.
"""
s = []
for t, models in self._types.items():
s.append('- {}'.format(t))
for m in models:
s.append(' + {}'.format(m))
return '\n'.join(s)
transients = TransientModels()
# Set up sncosmo models.
if use_sncosmo:
# Register SN Ia models
transients.register_builder({ 'modelname': 'hsiao',
'modeltype': 'Ia',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'nugent-sn1a',
'modeltype': 'Ia',
'modelpars': {'z':0., 't0':20., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'nugent-sn91t',
'modeltype': 'Ia',
'modelpars': {'z':0., 't0':20., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'nugent-sn91bg',
'modeltype': 'Ia',
'modelpars': {'z':0., 't0':15., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'salt2-extended',
'modeltype': 'Ia',
'modelpars': {'z':0., 't0':0., 'x0':1., 'x1':0., 'c':0.} },
ModelBuilder(Supernova))
# Register SN Ib models
transients.register_builder({ 'modelname': 's11-2005hl',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':-5., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 's11-2005hm',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':5., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 's11-2006jo',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2004gv',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006ep',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007y',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2004ib',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2005hm',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007nc',
'modeltype': 'Ib',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN Ib/c models
transients.register_builder({ 'modelname': 'nugent-sn1bc',
'modeltype': 'Ib/c',
'modelpars': {'z':0., 't0':20., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN Ic models
transients.register_builder({ 'modelname': 's11-2006fo',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2004fe',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2004gq',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-sdss004012',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006fo',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-sdss014475',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006lc',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-04d1la',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-04d4jv',
'modeltype': 'Ic',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN IIn models
transients.register_builder({ 'modelname': 'nugent-sn2n',
'modeltype': 'IIn',
'modelpars': {'z':0., 't0':20., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006ez',
'modeltype': 'IIn',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006ix',
'modeltype': 'IIn',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN IIP models
transients.register_builder({ 'modelname': 'nugent-sn2p',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':20., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 's11-2005lc',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 's11-2005gi',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 's11-2006jl',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2004hx',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2005gi',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006gq',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006kn',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006jl',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006iw',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006kv',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2006ns',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007iz',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007nr',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007kw',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007ky',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007lj',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007lb',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007ll',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007nw',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007ld',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007md',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007lz',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007lx',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007og',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007nv',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
transients.register_builder({ 'modelname': 'snana-2007pg',
'modeltype': 'IIP',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN IIL
transients.register_builder({ 'modelname': 'nugent-sn2l',
'modeltype': 'IIL',
'modelpars': {'z':0., 't0':12., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN IIL/P
transients.register_builder({ 'modelname': 's11-2004hx',
'modeltype': 'IIL/P',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
# Register SN II-pec
transients.register_builder({ 'modelname': 'snana-2007ms',
'modeltype': 'II-pec',
'modelpars': {'z':0., 't0':0., 'amplitude':1.} },
ModelBuilder(Supernova))
| bsd-3-clause | a477c4bb4d0d9670ff60265f53e26552 | 38.07837 | 97 | 0.460091 | 4.294178 | false | false | false | false |
desihub/desisim | py/desisim/archetypes.py | 1 | 5024 | """
desisim.archetypes
==================
Archetype routines for desisim.
"""
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from desiutil.log import get_logger
log = get_logger()
def compute_chi2(flux, ferr=None):
"""Compute the chi2 distance matrix.
Parameters
----------
flux : numpy.ndarray
Array [Nspec, Npix] of spectra or templates where Nspec is the number of
spectra and Npix is the number of pixels.
ferr : numpy.ndarray
Uncertainty spectra ccorresponding to flux (default None).
Returns
-------
Tuple of (chi2, amp) where:
chi2 : numpy.ndarray
Chi^2 matrix [Nspec, Nspec] between all combinations of normalized spectra.
amp : numpy.ndarray
Amplitude matrix [Nspec, Nspec] between all combinations of spectra.
"""
nspec, npix = flux.shape
chi2 = np.zeros((nspec, nspec), dtype='f4')
amp = np.zeros((nspec, nspec), dtype='f4')
flux = flux.copy()
rescale = np.sqrt(npix/np.sum(flux**2,axis=1))
flux *= rescale[:,None]
if ferr is None:
for ii in range(nspec):
if ii % 500 == 0:
log.info('Computing chi2 matrix for spectra {}-{} out of {}.'.format(
int(ii/500) * 500, np.min(((ii+1) * int(ii/500), nspec-1)), nspec))
amp1 = np.sum(flux[ii]*flux,axis=1)/npix
chi2[ii,:] = npix*(1.-amp1**2)
amp[ii,:] = amp1
else:
from SetCoverPy import mathutils
ferr = ferr.copy()
ferr *= rescale[:,None]
for ii in range(nspec):
if ii % 500 == 0 or ii == 0:
log.info('Computing chi2 matrix for spectra {}-{} out of {}.'.format(
int(ii/500) * 500, np.min(((ii+1) * int(ii/500), nspec-1)), nspec))
xx = flux[ii, :].reshape(1, npix)
xxerr = ferr[ii, :].reshape(1, npix)
amp[ii,:], chi2[ii,:] = mathutils.quick_amplitude(xx, flux, xxerr, ferr, niter=1)
amp *= rescale[:,None]/rescale[None,:]
np.fill_diagonal(chi2,0.)
np.fill_diagonal(amp,1.)
return chi2, amp
class ArcheTypes(object):
"""Object for generating archetypes and determining their responsibility.
Parameters
----------
chi2 : numpy.ndarray
Chi^2 matrix computed by desisim.archetypes.compute_chi2().
"""
def __init__(self, chi2):
self.chi2 = chi2
def get_archetypes(self, chi2_thresh=0.1, responsibility=False):
"""Solve the SCP problem to get the final set of archetypes and, optionally,
their responsibility.
Note: We assume that each template has uniform "cost" but a more general
model in principle could be used / implemented.
Parameters
----------
chi2 : numpy.ndarray
Chi^2 matrix computed by archetypes.compute_chi2().
chi2_thresh : float
Threshold chi2 value to differentiate "different" templates.
responsibility : bool
If True, then compute and return the responsibility of each archetype.
Returns
-------
If responsibility==True then returns a tuple of (iarch, resp, respindx) where:
iarch : integer numpy.array
Indices of the archetypes [N].
resp : integer numpy.array
Responsibility of each archetype [N].
respindx : list of
Indices the parent sample each archetype is responsible for [N].
If responsibility==False then only iarch is returned.
"""
from SetCoverPy import setcover
nspec = self.chi2[0].shape
cost = np.ones(nspec) # uniform cost
a_matrix = (self.chi2 <= chi2_thresh) * 1
gg = setcover.SetCover(a_matrix, cost)
sol, time = gg.SolveSCP()
iarch = np.nonzero(gg.s)[0]
if responsibility:
resp, respindx = self.responsibility(iarch, a_matrix)
return iarch, resp, respindx
else:
return iarch
def responsibility(self, iarch, a_matrix):
"""Method to determine the responsibility of each archetype.
In essence, the responsibility is the number of templates described by each
archetype.
Parameters
----------
iarch : indices of the archetypes
a_matrix : distance matrix
Returns
-------
resp : responsibility of each archetype (number of objects represented by each archetype)
respindx : list containing the indices of the parent objects represented by each archetype
"""
narch = len(iarch)
resp = np.zeros(narch).astype('int16')
respindx = []
for ii, this in enumerate(iarch):
respindx.append(np.where(a_matrix[:, this] == 1)[0])
resp[ii] = np.count_nonzero(a_matrix[:, this])
return resp, respindx
| bsd-3-clause | 9379f1fa247aa80fa433e036a22a052f | 31 | 100 | 0.57703 | 3.82344 | false | false | false | false |
desihub/desisim | py/desisim/bal.py | 1 | 5869 | """
desisim.bal
===========
Functions and methods for inserting BALs into QSO spectra.
"""
from __future__ import division, print_function
import numpy as np
class BAL(object):
"""Base class for inserting BALs into (input) QSO spectra."""
def __init__(self):
"""Read and cache the BAL template set.
Attributes:
balflux (numpy.ndarray): Array [nbase,npix] of the rest-frame BAL
templates.
balwave (numpy.ndarray): Array [npix] of rest-frame wavelengths
corresponding to BASEFLUX (Angstrom).
balmeta (astropy.Table): Table of metadata [nbase] for each template.
"""
from desisim.io import read_basis_templates
balflux, balwave, balmeta = read_basis_templates(objtype='BAL')
self.balflux = balflux
self.balwave = balwave
self.balmeta = balmeta
def empty_balmeta(self, qsoredshift=None):
"""Initialize an empty metadata table for BALs."""
from astropy.table import Table, Column
if qsoredshift is None:
nqso = 1
else:
nqso = len(np.atleast_1d(qsoredshift))
balmeta = Table()
balmeta.add_column(Column(name='BAL_TEMPLATEID', length=nqso, dtype='i4', data=np.zeros(nqso)-1))
balmeta.add_column(Column(name='Z',length=nqso, dtype='f4', data=np.zeros(nqso)))
if qsoredshift is not None:
balmeta['Z'] = qsoredshift
return balmeta
def template_balmeta(self,indx):
"""Initialize an empty metadata table for BALs."""
from astropy.table import Table, Column
from desisim.io import find_basis_template
balmeta = self.balmeta.copy()
balmeta = balmeta[indx]
nbal = len(balmeta)
balmeta.add_column(Column(name='BAL_TEMPLATEID', length=nbal, dtype='i4', data=np.zeros(nbal)-1), index=0)
balmeta.add_column(Column(name='BAL_PROB', length=nbal, dtype='f4', data=np.ones(nbal)), index=0)
balmeta.add_column(Column(name='Z', length=nbal, dtype='f4', data=np.zeros(nbal)), index=0)
balmeta.add_column(Column(name='TARGETID', length=nbal, dtype='i4', data=np.zeros(nbal)-1), index=0)
return balmeta
def insert_bals(self, qsowave, qsoflux, qsoredshift, balprob=0.12,
seed=None, verbose=False, qsoid=None):
"""Probabilistically inserts BALs into one or more QSO spectra.
Args:
qsowave (numpy.ndarray): observed-frame wavelength array [Angstrom]
qsoflux (numpy.ndarray): array of observed frame flux values.
qsoredshift (numpy.array or float): QSO redshift
balprob (float, optional): Probability that a QSO is a BAL (default
0.12). Only used if QSO(balqso=True) at instantiation.
seed (int, optional): input seed for the random numbers.
verbose (bool, optional): Be verbose!
Returns:
bal_qsoflux (numpy.ndarray): QSO spectrum with the BAL included.
balmeta (astropy.Table): metadata table for each BAL.
"""
from desiutil.log import get_logger, DEBUG
from desispec.interpolation import resample_flux
from astropy.table import Table
if verbose:
log = get_logger(DEBUG)
else:
log = get_logger()
rand = np.random.RandomState(seed)
if balprob < 0:
log.warning('Balprob {} is negative; setting to zero.'.format(balprob))
balprob = 0.0
if balprob > 1:
log.warning('Balprob {} cannot exceed unity; setting to 1.0.'.format(balprob))
balprob = 1.0
nqso, nwave = qsoflux.shape
if len(qsoredshift) != nqso:
log.fatal('Dimensions of qsoflux and qsoredshift do not agree!')
raise ValueError
if qsowave.ndim == 2: # desisim.QSO(resample=True) returns a 2D wavelength array
w_nqso, w_nwave = qsowave.shape
if w_nwave != nwave or w_nqso != nqso:
log.fatal('Dimensions of qsoflux and qsowave do not agree!')
raise ValueError
else:
if len(qsowave) != nwave:
log.fatal('Dimensions of qsoflux and qsowave do not agree!')
raise ValueError
# Determine which QSO spectrum has BAL(s) and then loop on each.
hasbal = rand.random_sample(nqso) < balprob
ihasbal = np.where(hasbal)[0]
# Should probably return a BAL metadata table, too.
if len(ihasbal) == 0:
#Return a fully empy balmeta table
balmeta=Table(names=('TARGETID','Z','BAL_PROB','BAL_TEMPLATEID'), dtype=('i4', 'f4', 'f4','i4'))
return qsoflux, balmeta
balindx = rand.choice( len(self.balmeta), len(ihasbal) )
#before it was convenient to have the balmeta of size nqso's and remove non-BALs after. Now I think is easier to return the balmeta for BALs only.
balmeta = self.template_balmeta(balindx)
balmeta['Z'] = qsoredshift[ihasbal]
balmeta['BAL_TEMPLATEID'] = balindx
balmeta['TARGETID'] = qsoid[ihasbal]
bal_qsoflux = qsoflux.copy()
if qsowave.ndim == 2:
for ii, indx in zip( ihasbal, balindx ):
thisbalflux = resample_flux(qsowave[ii, :], self.balwave*(1 + qsoredshift[ii]),
self.balflux[indx, :], extrapolate=True)
bal_qsoflux[ii, :] *= thisbalflux
else:
for ii, indx in zip( ihasbal, balindx ):
thisbalflux = resample_flux(qsowave, self.balwave*(1 + qsoredshift[ii]),
self.balflux[indx, :], extrapolate=True)
bal_qsoflux[ii, :] *= thisbalflux
return bal_qsoflux, balmeta
| bsd-3-clause | 4617a51014694ea9535a3eb33961a25e | 39.475862 | 155 | 0.59465 | 3.602824 | false | false | false | false |
metoppv/improver | improver/spotdata/spot_extraction.py | 3 | 16425 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Spot data extraction from diagnostic fields using neighbour cubes."""
from typing import List, Optional, Tuple, Union
import iris
import numpy as np
from iris.coords import AuxCoord, DimCoord
from iris.cube import Cube, CubeList
from numpy import ndarray
from improver import BasePlugin
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.metadata.constants.mo_attributes import MOSG_GRID_ATTRIBUTES
from improver.metadata.utilities import create_coordinate_hash
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
from . import UNIQUE_ID_ATTRIBUTE
class SpotExtraction(BasePlugin):
"""
For the extraction of diagnostic data using neighbour cubes that contain
spot-site information and the appropriate grid point from which to source
data.
"""
def __init__(self, neighbour_selection_method: str = "nearest") -> None:
"""
Args:
neighbour_selection_method:
The neighbour cube may contain one or several sets of grid
coordinates that match a spot site. These are determined by
the neighbour finding method employed. This keyword is used to
extract the desired set of coordinates from the neighbour cube.
"""
self.neighbour_selection_method = neighbour_selection_method
def __repr__(self) -> str:
"""Represent the configured plugin instance as a string."""
return "<SpotExtraction: neighbour_selection_method: {}>".format(
self.neighbour_selection_method
)
def extract_coordinates(self, neighbour_cube: Cube) -> Cube:
"""
Extract the desired set of grid coordinates that correspond to spot
sites from the neighbour cube.
Args:
neighbour_cube:
A cube containing information about the spot data sites and
their grid point neighbours.
Returns:
A cube containing only the x and y grid coordinates for the
grid point neighbours given the chosen neighbour selection
method. The neighbour cube contains the indices stored as
floating point values, so they are converted to integers
in this cube.
Raises:
ValueError if the neighbour_selection_method expected is not found
in the neighbour cube.
"""
method = iris.Constraint(
neighbour_selection_method_name=self.neighbour_selection_method
)
index_constraint = iris.Constraint(grid_attributes_key=["x_index", "y_index"])
coordinate_cube = neighbour_cube.extract(method & index_constraint)
if coordinate_cube:
coordinate_cube.data = np.rint(coordinate_cube.data).astype(int)
return coordinate_cube
available_methods = neighbour_cube.coord(
"neighbour_selection_method_name"
).points
raise ValueError(
'The requested neighbour_selection_method "{}" is not available in'
" this neighbour_cube. Available methods are: {}.".format(
self.neighbour_selection_method, available_methods
)
)
@staticmethod
def check_for_unique_id(neighbour_cube: Cube) -> Optional[Tuple[ndarray, str]]:
"""
Determine if there is a unique ID coordinate, and if so return
the values and name of that coordinate.
Args:
neighbour_cube:
This cube on which to look for a unique site ID coordinate.
Returns:
- array of unique site IDs
- name of unique site ID coordinate
"""
try:
(unique_id_coord,) = [
crd
for crd in neighbour_cube.coords()
if UNIQUE_ID_ATTRIBUTE in crd.attributes
]
except ValueError:
pass
else:
return (unique_id_coord.points, unique_id_coord.name())
def get_aux_coords(
self, diagnostic_cube: Cube, x_indices: ndarray, y_indices: ndarray,
) -> Tuple[List[AuxCoord], List[AuxCoord]]:
"""
Extract scalar and non-scalar auxiliary coordinates from the diagnostic
cube. Multi-dimensional auxiliary coordinates have the relevant points
and bounds extracted for each site and a 1-dimensional coordinate is
constructed that can be associated with the spot-index coordinate.
Args:
diagnostic_cube:
The cube from which auxiliary coordinates will be taken.
x_indices, y_indices:
The array indices that correspond to sites for which coordinate
data is to be extracted.
Returns:
- list of scalar coordinates
- list of non-scalar, multi-dimensional coordinates reshaped into
1-dimensional coordinates.
"""
scalar_coords = []
nonscalar_coords = []
for coord in diagnostic_cube.aux_coords:
if coord.ndim > 1:
coord_points, coord_bounds = self.get_coordinate_data(
diagnostic_cube, x_indices, y_indices, coord.name()
)
nonscalar_coords.append(
coord.copy(points=coord_points, bounds=coord_bounds)
)
elif coord.points.size == 1:
scalar_coords.append(coord)
return scalar_coords, nonscalar_coords
@staticmethod
def get_coordinate_data(
diagnostic_cube: Cube, x_indices: ndarray, y_indices: ndarray, coordinate: str
) -> Union[ndarray, List[Union[ndarray, None]]]:
"""
Extracts coordinate points from 2-dimensional coordinates for
association with sites.
diagnostic_cube:
The cube from which auxiliary coordinates will be taken.
x_indices, y_indices:
The array indices that correspond to sites for which coordinate
data is to be extracted.
coordinate:
The name of the coordinate from which to extract data.
Returns:
A list containing an array of coordinate and bound values, with the
later instead being None if no bounds exist.
"""
coord_data = []
coord = diagnostic_cube.coord(coordinate)
coord_data.append(coord.points[..., y_indices, x_indices])
if coord.has_bounds():
coord_data.append(coord.bounds[..., y_indices, x_indices, :])
else:
coord_data.append(None)
return coord_data
@staticmethod
def build_diagnostic_cube(
neighbour_cube: Cube,
diagnostic_cube: Cube,
spot_values: ndarray,
additional_dims: Optional[List[DimCoord]] = [],
scalar_coords: Optional[List[AuxCoord]] = None,
auxiliary_coords: Optional[List[AuxCoord]] = None,
unique_site_id: Optional[Union[List[str], ndarray]] = None,
unique_site_id_key: Optional[str] = None,
) -> Cube:
"""
Builds a spot data cube containing the extracted diagnostic values.
Args:
neighbour_cube:
This cube is needed as a source for information about the spot
sites which needs to be included in the spot diagnostic cube.
diagnostic_cube:
The cube is needed to provide the name and units of the
diagnostic that is being processed.
spot_values:
An array containing the diagnostic values extracted for the
required spot sites.
additional_dims:
Optional list containing iris.coord.DimCoords with any leading
dimensions required before spot data.
scalar_coords:
Optional list containing iris.coord.AuxCoords with all scalar coordinates
relevant for the spot sites.
auxiliary_coords:
Optional list containing iris.coords.AuxCoords which are non-scalar.
unique_site_id:
Optional list of 8-digit unique site identifiers.
unique_site_id_key:
String to name the unique_site_id coordinate. Required if
unique_site_id is in use.
Returns:
A spot data cube containing the extracted diagnostic data.
"""
# Find any AuxCoords associated with the additional_dims so these can be copied too
additional_dims_aux = []
for dim_coord in additional_dims:
dim_coord_dim = diagnostic_cube.coord_dims(dim_coord)
aux_coords = [
aux_coord
for aux_coord in diagnostic_cube.aux_coords
if diagnostic_cube.coord_dims(aux_coord) == dim_coord_dim
]
additional_dims_aux.append(aux_coords if aux_coords else [])
spot_diagnostic_cube = build_spotdata_cube(
spot_values,
diagnostic_cube.name(),
diagnostic_cube.units,
neighbour_cube.coord("altitude").points,
neighbour_cube.coord(axis="y").points,
neighbour_cube.coord(axis="x").points,
neighbour_cube.coord("wmo_id").points,
unique_site_id=unique_site_id,
unique_site_id_key=unique_site_id_key,
scalar_coords=scalar_coords,
auxiliary_coords=auxiliary_coords,
additional_dims=additional_dims,
additional_dims_aux=additional_dims_aux,
)
return spot_diagnostic_cube
def process(
self,
neighbour_cube: Cube,
diagnostic_cube: Cube,
new_title: Optional[str] = None,
) -> Cube:
"""
Create a spot data cube containing diagnostic data extracted at the
coordinates provided by the neighbour cube.
.. See the documentation for more details about the inputs and output.
.. include:: /extended_documentation/spotdata/spot_extraction/
spot_extraction_examples.rst
Args:
neighbour_cube:
A cube containing information about the spot data sites and
their grid point neighbours.
diagnostic_cube:
A cube of diagnostic data from which spot data is being taken.
new_title:
New title for spot-extracted data. If None, this attribute is
reset to a default value, since it has no prescribed standard
and may therefore contain grid information that is no longer
correct after spot-extraction.
Returns:
A cube containing diagnostic data for each spot site, as well
as information about the sites themselves.
"""
# Check we are using a matched neighbour/diagnostic cube pair
check_grid_match([neighbour_cube, diagnostic_cube])
# Get the unique_site_id if it is present on the neighbour cbue
unique_site_id_data = self.check_for_unique_id(neighbour_cube)
if unique_site_id_data:
unique_site_id = unique_site_id_data[0]
unique_site_id_key = unique_site_id_data[1]
else:
unique_site_id, unique_site_id_key = None, None
# Ensure diagnostic cube is y-x order as neighbour cube expects.
enforce_coordinate_ordering(
diagnostic_cube,
[
diagnostic_cube.coord(axis="y").name(),
diagnostic_cube.coord(axis="x").name(),
],
anchor_start=False,
)
coordinate_cube = self.extract_coordinates(neighbour_cube)
x_indices, y_indices = coordinate_cube.data
spot_values = diagnostic_cube.data[..., y_indices, x_indices]
additional_dims = []
if len(spot_values.shape) > 1:
additional_dims = diagnostic_cube.dim_coords[:-2]
scalar_coords, nonscalar_coords = self.get_aux_coords(
diagnostic_cube, x_indices, y_indices
)
spotdata_cube = self.build_diagnostic_cube(
neighbour_cube,
diagnostic_cube,
spot_values,
scalar_coords=scalar_coords,
auxiliary_coords=nonscalar_coords,
additional_dims=additional_dims,
unique_site_id=unique_site_id,
unique_site_id_key=unique_site_id_key,
)
# Copy attributes from the diagnostic cube that describe the data's
# provenance
spotdata_cube.attributes = diagnostic_cube.attributes
spotdata_cube.attributes["model_grid_hash"] = neighbour_cube.attributes[
"model_grid_hash"
]
# Remove the unique_site_id coordinate attribute as it is internal
# metadata only
if unique_site_id is not None:
spotdata_cube.coord(unique_site_id_key).attributes.pop(UNIQUE_ID_ATTRIBUTE)
# Remove grid attributes and update title
for attr in MOSG_GRID_ATTRIBUTES:
spotdata_cube.attributes.pop(attr, None)
spotdata_cube.attributes["title"] = (
MANDATORY_ATTRIBUTE_DEFAULTS["title"] if new_title is None else new_title
)
# Copy cell methods
spotdata_cube.cell_methods = diagnostic_cube.cell_methods
return spotdata_cube
def check_grid_match(cubes: Union[List[Cube], CubeList]) -> None:
"""
Checks that cubes are on, or originate from, compatible coordinate grids.
Each cube is first checked for an existing 'model_grid_hash' which can be
used to encode coordinate information on cubes that do not themselves
contain a coordinate grid (e.g. spotdata cubes). If this is not found a new
hash is generated to enable comparison. If the cubes are not compatible, an
exception is raised to prevent the use of unmatched cubes.
Args:
cubes:
A list of cubes to check for grid compatibility.
Raises:
ValueError: Raised if the cubes are not on matching grids as
identified by the model_grid_hash.
"""
def _get_grid_hash(cube):
try:
cube_hash = cube.attributes["model_grid_hash"]
except KeyError:
cube_hash = create_coordinate_hash(cube)
return cube_hash
cubes = iter(cubes)
reference_hash = _get_grid_hash(next(cubes))
for cube in cubes:
cube_hash = _get_grid_hash(cube)
if cube_hash != reference_hash:
raise ValueError(
"Cubes do not share or originate from the same "
"grid, so cannot be used together."
)
| bsd-3-clause | f283875d46a99785f67492bb583ad6db | 39.455665 | 91 | 0.62691 | 4.462103 | false | false | false | false |
metoppv/improver | improver/cli/construct_reliability_tables.py | 3 | 4678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""CLI to construct reliability tables for use in reliability calibration."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
*cubes: cli.inputcube,
truth_attribute,
n_probability_bins: int = 5,
single_value_lower_limit: bool = False,
single_value_upper_limit: bool = False,
aggregate_coordinates: cli.comma_separated_list = None,
):
"""Populate reliability tables for use in reliability calibration.
Loads historical forecasts and gridded truths that are compared to build
reliability tables. Reliability tables are returned as a cube with a
leading threshold dimension that matches that of the forecast probability
cubes and the thresholded truth.
Args:
cubes (list of iris.cube.Cube):
A list of cubes containing the historical probability forecasts and
corresponding truths used for calibration. These cubes must include
the same diagnostic name in their names, and must both have
equivalent threshold coordinates. The cubes will be distinguished
using the user provided truth attribute.
truth_attribute (str):
An attribute and its value in the format of "attribute=value",
which must be present on truth cubes.
n_probability_bins (int):
The total number of probability bins required in the reliability
tables. If single value limits are turned on, these are included in
this total. If using single_value_limits this value must be at
least 3.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued, with a small
precision tolerance, defined as 1.0E-6. The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued, with a small
precision tolerance, defined as 1.0E-6. The bin is thus (1 - 1.0E-6) to 1.
aggregate_coordinates (List[str]):
An optional list of coordinates over which to aggregate the reliability
calibration table using summation. This is equivalent to constructing
then using aggregate-reliability-tables but with reduced memory
usage due to avoiding large intermediate data.
Returns:
iris.cube.Cube:
Reliability tables for the forecast diagnostic with a leading
threshold coordinate.
"""
from improver.calibration import split_forecasts_and_truth
from improver.calibration.reliability_calibration import (
ConstructReliabilityCalibrationTables,
)
forecast, truth, _ = split_forecasts_and_truth(cubes, truth_attribute)
return ConstructReliabilityCalibrationTables(
n_probability_bins=n_probability_bins,
single_value_lower_limit=single_value_lower_limit,
single_value_upper_limit=single_value_upper_limit,
)(forecast, truth, aggregate_coordinates)
| bsd-3-clause | d13acb50b5c508add4e4d31ff3010783 | 47.226804 | 86 | 0.709491 | 4.546161 | false | false | false | false |
metoppv/improver | improver/psychrometric_calculations/cloud_condensation_level.py | 2 | 5584 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module to contain CloudCondensationLevel plugin."""
from typing import List, Tuple
import numpy as np
from iris.cube import Cube
from iris.exceptions import CoordinateNotFoundError
from scipy.optimize import newton
from improver import PostProcessingPlugin
from improver.metadata.utilities import (
create_new_diagnostic_cube,
generate_mandatory_attributes,
)
from improver.psychrometric_calculations.psychrometric_calculations import (
dry_adiabatic_temperature,
saturated_humidity,
)
class CloudCondensationLevel(PostProcessingPlugin):
"""
Derives the temperature and pressure of the convective cloud condensation
level from near-surface values of temperature, pressure and humidity mixing
ratio.
"""
def __init__(self, model_id_attr: str = None):
"""
Set up class
Args:
model_id_attr:
Name of model ID attribute to be copied from source cubes to output cube
"""
self.model_id_attr = model_id_attr
self.temperature, self.pressure, self.humidity = None, None, None
def _make_ccl_cube(self, data: np.ndarray, is_temperature: bool) -> Cube:
"""Puts the data array into a CF-compliant cube"""
attributes = {}
if self.model_id_attr:
attributes[self.model_id_attr] = self.temperature.attributes[
self.model_id_attr
]
if is_temperature:
name = "air_temperature_at_condensation_level"
units = "K"
else:
name = "air_pressure_at_condensation_level"
units = "Pa"
cube = create_new_diagnostic_cube(
name,
units,
self.temperature,
mandatory_attributes=generate_mandatory_attributes(
[self.temperature, self.pressure, self.humidity]
),
optional_attributes=attributes,
data=data,
)
# The template cube may have had a height coord describing it as screen-level.
# This needs removing:
try:
cube.remove_coord("height")
except CoordinateNotFoundError:
pass
return cube
def _iterate_to_ccl(self) -> Tuple[np.ndarray, np.ndarray]:
"""Uses a Newton iterator to find the pressure level where the
adiabatically-adjusted temperature equals the saturation temperature.
Returns pressure and temperature arrays."""
def humidity_delta(p2, p, t, q):
"""For a target pressure guess, p2, and origin p, t and q, return the
difference between q and q_sat(t2, p2)"""
t2 = dry_adiabatic_temperature(t, p, p2)
return q - saturated_humidity(t2, p2)
ccl_pressure = newton(
humidity_delta,
self.pressure.data.copy(),
args=(self.pressure.data, self.temperature.data, self.humidity.data),
tol=100,
maxiter=20,
).astype(np.float32)
ccl_temperature = dry_adiabatic_temperature(
self.temperature.data, self.pressure.data, ccl_pressure
)
return ccl_pressure, ccl_temperature
def process(self, cubes: List[Cube]) -> Tuple[Cube, Cube]:
"""
Calculates the cloud condensation level from the near-surface inputs.
Args:
cubes:
Cubes, in this order, of temperature (K), pressure (Pa)
and humidity mixing ratio (kg kg-1)
Returns:
Cubes of air_temperature_at_cloud_condensation_level and
air_pressure_at_cloud_condensation_level
"""
self.temperature, self.pressure, self.humidity = cubes
ccl_pressure, ccl_temperature = self._iterate_to_ccl()
return (
self._make_ccl_cube(ccl_temperature, is_temperature=True),
self._make_ccl_cube(ccl_pressure, is_temperature=False),
)
| bsd-3-clause | 3916531b53a78af742c1dfa7187a0470 | 38.602837 | 88 | 0.653653 | 4.256098 | false | false | false | false |
metoppv/improver | improver/utilities/interpolation.py | 3 | 9823 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module to contain interpolation functions."""
import warnings
from typing import Optional
import iris
import numpy as np
from iris.cube import Cube
from numpy import ndarray
from scipy.interpolate import griddata
from scipy.spatial.qhull import QhullError
from improver import BasePlugin
def interpolate_missing_data(
data: ndarray,
method: str = "linear",
limit: Optional[ndarray] = None,
limit_as_maximum: bool = True,
valid_points: Optional[ndarray] = None,
) -> ndarray:
"""
Args:
data:
The field of data to be interpolated across gaps.
method:
The method to use to fill in the data. This is usually "linear" for
linear interpolation, and "nearest" for a nearest neighbour
approach. It can take any method available to the method
scipy.interpolate.griddata.
limit:
The array containing limits for each grid point that are
imposed on any value in the region that has been interpolated.
limit_as_maximum:
If True the test against the limit array is that if the
interpolated values exceed the limit they should be set to the
limit value. If False, the test is whether the interpolated values
fall below the limit value.
valid_points:
A boolean array that allows a subset of the unmasked data to be
chosen as source data for the interpolation process. True values
in this array mark points that can be used for interpolation if
they are not otherwise invalid. False values mark points that
should not be used, even if they are otherwise valid data points.
Returns:
The original data plus interpolated data in masked regions where it
was possible to fill these in.
"""
if valid_points is None:
valid_points = np.full_like(data, True, dtype=np.bool)
# Interpolate linearly across the remaining points
index = ~np.isnan(data)
index_valid_data = valid_points[index]
index[index] = index_valid_data
data_filled = data
if np.any(index):
ynum, xnum = data.shape
(y_points, x_points) = np.mgrid[0:ynum, 0:xnum]
values = data[index]
try:
data_updated = griddata(
np.where(index), values, (y_points, x_points), method=method
)
except QhullError:
data_filled = data
else:
data_filled = data_updated
if limit is not None:
index = ~np.isfinite(data) & np.isfinite(data_filled)
if limit_as_maximum:
data_filled[index] = np.clip(data_filled[index], None, limit[index])
else:
data_filled[index] = np.clip(data_filled[index], limit[index], None)
index = ~np.isfinite(data)
data[index] = data_filled[index]
return data
class InterpolateUsingDifference(BasePlugin):
"""
Uses interpolation to fill masked regions in the data contained within the
input cube. This is achieved by calculating the difference between the
input cube and a complete (i.e. complete across the whole domain) reference
cube. The difference between the data in regions where they overlap is
calculated and this difference field is then interpolated across the
domain. Any masked regions in the input cube data are then filled with data
calculated as the reference cube data minus the interpolated difference
field.
"""
def __repr__(self) -> str:
"""String representation of plugin."""
return "<InterpolateUsingDifference>"
@staticmethod
def _check_inputs(cube: Cube, reference_cube: Cube, limit: Optional[Cube]) -> None:
"""
Check that the input cubes are compatible and the data is complete or
masked as expected.
"""
if np.isnan(reference_cube.data).any():
raise ValueError(
"The reference cube contains np.nan data indicating that it "
"is not complete across the domain."
)
try:
reference_cube.convert_units(cube.units)
if limit is not None:
limit.convert_units(cube.units)
except ValueError as err:
raise type(err)(
"Reference cube and/or limit do not have units compatible with"
" cube. " + str(err)
)
def process(
self,
cube: Cube,
reference_cube: Cube,
limit: Optional[Cube] = None,
limit_as_maximum: bool = True,
) -> Cube:
"""
Apply plugin to input data.
Args:
cube:
Cube for which interpolation is required to fill masked
regions.
reference_cube:
A cube that covers the entire domain that it shares with
cube.
limit:
A cube of limiting values to apply to the cube that is being
filled in. This can be used to ensure that the resulting values
do not fall below / exceed the limiting values; whether the
limit values should be used as a minima or maxima is
determined by the limit_as_maximum option. These values should
be on an x-y grid of the same size as an x-y slice of cube.
limit_as_maximum:
If True the test against the values allowed by the limit array
is that if the interpolated values exceed the limit they should
be set to the limit value. If False, the test is whether the
interpolated values fall below the limit value.
Return:
A copy of the input cube in which the missing data has been
populated with values obtained through interpolating the
difference field and subtracting the result from the reference
cube.
Raises:
ValueError: If the reference cube is not complete across the
entire domain.
"""
if not np.ma.is_masked(cube.data):
warnings.warn(
"Input cube unmasked, no data to fill in, returning " "unchanged."
)
return cube
self._check_inputs(cube, reference_cube, limit)
filled_cube = iris.cube.CubeList()
xaxis, yaxis = cube.coord(axis="x"), cube.coord(axis="y")
for cslice, rslice in zip(
cube.slices([yaxis, xaxis]), reference_cube.slices([yaxis, xaxis])
):
invalid_points = cslice.data.mask.copy()
valid_points = ~invalid_points
difference_field = np.subtract(
rslice.data,
cslice.data,
out=np.full(cslice.shape, np.nan),
where=valid_points,
)
interpolated_difference = interpolate_missing_data(
difference_field, valid_points=valid_points
)
# If any invalid points remain in the difference field, use nearest
# neighbour interpolation to fill these with the nearest difference
remain_invalid = np.isnan(interpolated_difference)
if remain_invalid.any():
interpolated_difference = interpolate_missing_data(
difference_field, valid_points=~remain_invalid, method="nearest"
)
result = cslice.copy()
result.data[invalid_points] = (
rslice.data[invalid_points] - interpolated_difference[invalid_points]
)
if limit is not None:
if limit_as_maximum:
result.data[invalid_points] = np.clip(
result.data[invalid_points], None, limit.data[invalid_points]
)
else:
result.data[invalid_points] = np.clip(
result.data[invalid_points], limit.data[invalid_points], None
)
filled_cube.append(result)
return filled_cube.merge_cube()
| bsd-3-clause | e72512f43f14fe08be1024abd491ef77 | 39.258197 | 87 | 0.622213 | 4.664292 | false | false | false | false |
metoppv/improver | improver/psychrometric_calculations/cloud_top_temperature.py | 1 | 6910 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module containing the CloudTopTemperature plugin"""
import numpy as np
from iris.cube import Cube
from numpy import ndarray
from improver import PostProcessingPlugin
from improver.metadata.utilities import (
create_new_diagnostic_cube,
generate_mandatory_attributes,
)
from improver.psychrometric_calculations.psychrometric_calculations import (
adjust_for_latent_heat,
dry_adiabatic_temperature,
saturated_humidity,
)
from improver.utilities.cube_checker import assert_spatial_coords_match
class CloudTopTemperature(PostProcessingPlugin):
"""Plugin to calculate the convective cloud top temperature from the
cloud condensation level temperature and pressure, and temperature
on pressure levels data using saturated ascent.
The temperature is that of the parcel after saturated ascent at the last pressure level
where the parcel is buoyant. The interpolation required to get closer is deemed expensive.
If the cloud top temperature is less than 4K colder than the cloud condensation level,
the cloud top temperature is masked.
"""
def __init__(self, model_id_attr: str = None):
"""
Set up class
Args:
model_id_attr:
Name of model ID attribute to be copied from source cubes to output cube
"""
self.model_id_attr = model_id_attr
self.t_at_ccl = Cube(None)
self.p_at_ccl = Cube(None)
self.temperature = Cube(None)
self.minimum_t_diff = 4
def _calculate_cct(self) -> ndarray:
"""
Ascends through the pressure levels (decreasing pressure) calculating the saturated
ascent from the cloud condensation level until a level is reached where the
atmosphere profile is warmer than the ascending parcel, signifying that buoyancy
is negative and cloud growth has ceased.
CCT is the saturated adiabatic temperature at the last atmosphere pressure level where
the profile is buoyant.
Temperature data are in Kelvin, Pressure data are in pascals, humidity data are in kg/kg.
A mask is used to keep track of which columns have already been solved, so that they
are not included in future iterations (x3 speed-up).
"""
cct = np.ma.masked_array(self.t_at_ccl.data.copy())
q_at_ccl = saturated_humidity(self.t_at_ccl.data, self.p_at_ccl.data)
ccl_with_mask = np.ma.masked_array(self.t_at_ccl.data, False)
mask = ~ccl_with_mask.mask
for t in self.temperature.slices_over("pressure"):
t_environment = np.full_like(t.data, np.nan)
t_environment[mask] = t.data[mask]
(pressure,) = t.coord("pressure").points
t_dry_parcel = dry_adiabatic_temperature(
self.t_at_ccl.data[mask], self.p_at_ccl.data[mask], pressure,
)
t_parcel = np.full_like(t.data, np.nan)
t_parcel[mask], _ = adjust_for_latent_heat(
t_dry_parcel, q_at_ccl[mask], pressure
)
# Mask out points where parcel temperature, t_parcel, is less than atmosphere
# temperature, t, but only after the parcel pressure, becomes lower than the
# cloud base pressure.
ccl_with_mask[mask] = np.ma.masked_where(
(t_parcel[mask] < t_environment[mask])
& (pressure < self.p_at_ccl.data[mask]),
ccl_with_mask[mask],
)
# Find mask with CCL points that are still not masked.
mask = ~ccl_with_mask.mask
cct[mask] = t_parcel[mask]
del t
if mask.sum() == 0:
break
cct = np.ma.masked_where(self.t_at_ccl.data - cct < self.minimum_t_diff, cct)
return cct
def _make_cct_cube(self, data: ndarray) -> Cube:
"""Puts the data array into a CF-compliant cube"""
attributes = {}
if self.model_id_attr:
attributes[self.model_id_attr] = self.t_at_ccl.attributes[
self.model_id_attr
]
cube = create_new_diagnostic_cube(
"air_temperature_at_convective_cloud_top",
"K",
self.t_at_ccl,
mandatory_attributes=generate_mandatory_attributes(
[self.t_at_ccl, self.p_at_ccl]
),
optional_attributes=attributes,
data=data,
)
return cube
def process(self, t_at_ccl: Cube, p_at_ccl: Cube, temperature: Cube) -> Cube:
"""
Args:
t_at_ccl:
temperature at cloud condensation level
p_at_ccl:
pressure at cloud condensation level
temperature:
temperature on pressure levels
Returns:
Cube of cloud top temperature
"""
self.t_at_ccl = t_at_ccl
self.p_at_ccl = p_at_ccl
self.temperature = temperature
assert_spatial_coords_match([self.t_at_ccl, self.p_at_ccl, self.temperature])
self.temperature.convert_units("K")
self.t_at_ccl.convert_units("K")
self.p_at_ccl.convert_units("Pa")
cct = self._make_cct_cube(self._calculate_cct())
return cct
| bsd-3-clause | fd0b34ad36170a8c9bfec8423a6244fe | 41.392638 | 97 | 0.644863 | 3.890766 | false | false | false | false |
metoppv/improver | improver/metadata/utilities.py | 2 | 8372 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""General IMPROVER metadata utilities"""
import hashlib
import pprint
from typing import Any, Dict, List, Optional, Type, Union
import dask.array as da
import iris
import numpy as np
from cf_units import Unit
from iris.cube import Cube
from numpy import ndarray
from numpy.ma.core import MaskedArray
from improver.metadata.constants.attributes import (
MANDATORY_ATTRIBUTE_DEFAULTS,
MANDATORY_ATTRIBUTES,
)
def create_new_diagnostic_cube(
name: str,
units: Union[Unit, str],
template_cube: Cube,
mandatory_attributes: Union[Dict[str, str], Dict],
optional_attributes: Optional[Union[Dict[str, str], Dict]] = None,
data: Optional[Union[MaskedArray, ndarray]] = None,
dtype: Type = np.float32,
) -> Cube:
"""
Creates a new diagnostic cube with suitable metadata.
Args:
name:
Standard or long name for output cube
units:
Units for output cube
template_cube:
Cube from which to copy dimensional and auxiliary coordinates
mandatory_attributes:
Dictionary containing values for the mandatory attributes
"title", "source" and "institution". These are overridden by
values in the optional_attributes dictionary, if specified.
optional_attributes:
Dictionary of optional attribute names and values. If values for
mandatory attributes are included in this dictionary they override
the values of mandatory_attributes.
data:
Data array. If not set, cube is filled with zeros using a lazy
data object, as this will be overwritten later by the caller
routine.
dtype:
Datatype for dummy cube data if "data" argument is None.
Returns:
Cube with correct metadata to accommodate new diagnostic field
"""
attributes = mandatory_attributes
if optional_attributes is not None:
attributes.update(optional_attributes)
error_msg = ""
for attr in MANDATORY_ATTRIBUTES:
if attr not in attributes:
error_msg += "{} attribute is required\n".format(attr)
if error_msg:
raise ValueError(error_msg)
if data is None:
data = da.zeros_like(template_cube.core_data(), dtype=dtype)
aux_coords_and_dims, dim_coords_and_dims = [
[
(coord.copy(), template_cube.coord_dims(coord))
for coord in getattr(template_cube, coord_type)
]
for coord_type in ("aux_coords", "dim_coords")
]
cube = iris.cube.Cube(
data,
units=units,
attributes=attributes,
dim_coords_and_dims=dim_coords_and_dims,
aux_coords_and_dims=aux_coords_and_dims,
)
cube.rename(name)
return cube
def generate_mandatory_attributes(
diagnostic_cubes: List[Cube], model_id_attr: Optional[str] = None
) -> Dict[str, str]:
"""
Function to generate mandatory attributes for new diagnostics that are
generated using several different model diagnostics as input to the
calculation. If all input diagnostics have the same attribute use this,
otherwise set a default value.
Args:
diagnostic_cubes:
List of diagnostic cubes used in calculating the new diagnostic
model_id_attr:
Name of attribute used to identify source model for blending,
if required
Returns:
Dictionary of mandatory attribute "key": "value" pairs.
"""
missing_value = object()
attr_dicts = [cube.attributes for cube in diagnostic_cubes]
required_attributes = [model_id_attr] if model_id_attr else []
attributes = MANDATORY_ATTRIBUTE_DEFAULTS.copy()
for attr in MANDATORY_ATTRIBUTES + required_attributes:
unique_values = {d.get(attr, missing_value) for d in attr_dicts}
if len(unique_values) == 1 and missing_value not in unique_values:
(attributes[attr],) = unique_values
elif attr in required_attributes:
msg = (
'Required attribute "{}" is missing or '
"not the same on all input cubes"
)
raise ValueError(msg.format(attr))
return attributes
def generate_hash(data_in: Any) -> str:
"""
Generate a hash from the data_in that can be used to uniquely identify
equivalent data_in.
Args:
data_in:
The data from which a hash is to be generated. This can be of any
type that can be pretty printed.
Returns:
A hexadecimal string which is a hash hexdigest of the data as a
string.
"""
bytestring = pprint.pformat(data_in).encode("utf-8")
return hashlib.sha256(bytestring).hexdigest()
def create_coordinate_hash(cube: Cube) -> str:
"""
Generate a hash based on the input cube's x and y coordinates. This
acts as a unique identifier for the grid which can be used to allow two
grids to be compared.
Args:
cube:
The cube from which x and y coordinates will be used to
generate a hash.
Returns:
A hash created using the x and y coordinates of the input cube.
"""
hashable_data = []
for axis in ("x", "y"):
coord = cube.coord(axis=axis)
hashable_data.extend(
[
list(coord.points),
list(coord.bounds) if isinstance(coord.bounds, list) else None,
coord.standard_name,
coord.long_name,
coord.coord_system,
coord.units,
]
)
return generate_hash(hashable_data)
def get_model_id_attr(cubes: List[Cube], model_id_attr: str) -> str:
"""
Gets the specified model ID attribute from a list of input cubes, checking
that the value is the same on all those cubes in the process.
Args:
cubes:
List of cubes to get the attribute from
model_id_attr:
Attribute name
Returns:
The unique attribute value
"""
try:
model_id_value = {cube.attributes[model_id_attr] for cube in cubes}
except KeyError as error:
failing_cubes = [
cube.name()
for cube in cubes
if not cube.attributes.get(model_id_attr, False)
]
raise ValueError(
f"Model ID attribute {model_id_attr} not present for {', '.join(failing_cubes)}."
) from error
if len(model_id_value) != 1:
raise ValueError(
f"Attribute {model_id_attr} must be the same for all input cubes. "
f"{' != '.join(model_id_value)}"
)
(model_id_value,) = model_id_value
return model_id_value
| bsd-3-clause | 38bb958a46dc32ae986358c1c6e043e3 | 34.176471 | 93 | 0.647516 | 4.299949 | false | false | false | false |
metoppv/improver | improver_tests/acceptance/test_blend_adjacent_points.py | 3 | 4998 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for the blend-adjacent-points CLI"""
import pytest
from . import acceptance as acc
pytestmark = [pytest.mark.acc, acc.skip_if_kgo_missing]
CLI = acc.cli_name_with_dashes(__file__)
run_cli = acc.run_cli(CLI)
def test_basic_mean(tmp_path):
"""Test basic blend-adjacent-points usage"""
kgo_dir = acc.kgo_root() / "blend-adjacent-points/basic_mean"
kgo_path = kgo_dir / "kgo.nc"
multi_prob = sorted(kgo_dir.glob("multiple_probabilities_rain_*H.nc"))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_period",
"--central-point",
"2",
"--units",
"hours",
"--width",
"3",
*multi_prob,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_time_bounds(tmp_path):
"""Test triangular time blending with matched time bounds"""
kgo_dir = acc.kgo_root() / "blend-adjacent-points/time_bounds"
kgo_path = kgo_dir / "kgo.nc"
multi_prob = sorted(kgo_dir.glob("*wind_gust*.nc"))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_period",
"--central-point",
"4",
"--units",
"hours",
"--width",
"2",
*multi_prob,
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path)
def test_mismatched_bounds_ranges(tmp_path):
"""Test triangular time blending with mismatched time bounds"""
kgo_dir = acc.kgo_root() / "blend-adjacent-points/basic_mean"
multi_prob = sorted(kgo_dir.glob("multiple_probabilities_rain_*H.nc"))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"forecast_period",
"--central-point",
"2",
"--units",
"hours",
"--width",
"3",
"--blend-time-using-forecast-period",
*multi_prob,
"--output",
output_path,
]
with pytest.raises(ValueError, match=".*mismatching time bounds.*"):
run_cli(args)
def test_mismatched_args(tmp_path):
"""Test triangular time blending with inappropriate arguments"""
kgo_dir = acc.kgo_root() / "blend-adjacent-points/basic_mean"
multi_prob = sorted(kgo_dir.glob("multiple_probabilities_rain_*H.nc"))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"model",
"--central-point",
"2",
"--units",
"None",
"--width",
"3",
"--blend-time-using-forecast-period",
*multi_prob,
"--output",
output_path,
]
with pytest.raises(ValueError, match=".*blend-time-using-forecast.*"):
run_cli(args)
def test_time(tmp_path):
"""Test time coordinate blending"""
kgo_dir = acc.kgo_root() / "blend-adjacent-points/time_bounds"
multi_prob = sorted(kgo_dir.glob("*wind_gust*.nc"))
output_path = tmp_path / "output.nc"
args = [
"--coordinate",
"time",
"--central-point",
"1536908400",
"--units",
"seconds since 1970-01-01 00:00:00",
"--width",
"7200",
*multi_prob,
"--output",
output_path,
]
with pytest.raises(ValueError, match=".*Cannot blend over time.*"):
run_cli(args)
| bsd-3-clause | 4af2c0d795e1bd50ad561cccbfdfe92f | 31.666667 | 79 | 0.614646 | 3.715985 | false | true | false | false |
metoppv/improver | improver/cli/orographic_enhancement.py | 3 | 5278 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to calculate orographic enhancement."""
from improver import cli
def extract_and_check(cube, height_value, units):
"""
Function to attempt to extract a height level.
If no matching level is available an error is raised.
Args:
cube (cube):
Cube to be extracted from and checked it worked.
height_value (float):
The boundary height to be extracted with the input units.
units (str):
The units of the height level to be extracted.
Returns:
iris.cube.Cube:
A cube containing the extracted height level.
Raises:
ValueError: If height level is not found in the input cube.
"""
from improver.utilities.cube_extraction import extract_subcube
# Write constraint in this format so a constraint is constructed that
# is suitable for floating point comparison
height_constraint = [
"height=[{}:{}]".format(height_value - 0.1, height_value + 0.1)
]
cube = extract_subcube(cube, height_constraint, units=[units])
if cube is not None:
return cube
raise ValueError("No data available at height {}{}".format(height_value, units))
@cli.clizefy
@cli.with_output
def process(
temperature: cli.inputcube,
humidity: cli.inputcube,
pressure: cli.inputcube,
wind_speed: cli.inputcube,
wind_direction: cli.inputcube,
orography: cli.inputcube,
*,
boundary_height: float = 1000.0,
boundary_height_units="m",
):
"""Calculate orographic enhancement
Uses the ResolveWindComponents() and OrographicEnhancement() plugins.
Outputs data on the high resolution orography grid.
Args:
temperature (iris.cube.Cube):
Cube containing temperature at top of boundary layer.
humidity (iris.cube.Cube):
Cube containing relative humidity at top of boundary layer.
pressure (iris.cube.Cube):
Cube containing pressure at top of boundary layer.
wind_speed (iris.cube.Cube):
Cube containing wind speed values.
wind_direction (iris.cube.Cube):
Cube containing wind direction values relative to true north.
orography (iris.cube.Cube):
Cube containing height of orography above sea level on high
resolution (1 km) UKPP domain grid.
boundary_height (float):
Model height level to extract variables for calculating orographic
enhancement, as proxy for the boundary layer.
boundary_height_units (str):
Units of the boundary height specified for extracting model levels.
Returns:
iris.cube.Cube:
Precipitation enhancement due to orography on the high resolution
input orography grid.
"""
from improver.orographic_enhancement import OrographicEnhancement
from improver.wind_calculations.wind_components import ResolveWindComponents
constraint_info = (boundary_height, boundary_height_units)
temperature = extract_and_check(temperature, *constraint_info)
humidity = extract_and_check(humidity, *constraint_info)
pressure = extract_and_check(pressure, *constraint_info)
wind_speed = extract_and_check(wind_speed, *constraint_info)
wind_direction = extract_and_check(wind_direction, *constraint_info)
# resolve u and v wind components
u_wind, v_wind = ResolveWindComponents()(wind_speed, wind_direction)
# calculate orographic enhancement
return OrographicEnhancement()(
temperature, humidity, pressure, u_wind, v_wind, orography
)
| bsd-3-clause | 9f7f2b87fbf367235a1980182d15b84b | 39.6 | 84 | 0.697423 | 4.391015 | false | false | false | false |
metoppv/improver | improver_tests/blending/weighted_blend/test_PercentileBlendingAggregator.py | 3 | 12267 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the weighted_blend.PercentileBlendingAggregator class."""
import unittest
import numpy as np
from iris.tests import IrisTest
from improver.blending.weighted_blend import PercentileBlendingAggregator
# The PERCENTILE_DATA below were generated using a call to np.random.rand
# The numbers were then scaled between 12 and 18, envisaged as Spring or
# Autumn temperatures in Celsius. These data have been reshaped and sorted so that
# the data are in ascending order along the first axis, suitable for a cube
# with a leading "percentile" dimension.
PERCENTILE_DATA = np.array(
[
[
[[14.501231, 13.732982], [13.354028, 13.747394]],
[[13.009074, 12.984587], [12.560181, 12.503394]],
[[12.968588, 13.497512], [12.824328, 12.66411]],
],
[
[[14.522574, 13.770423], [14.526242, 13.807901]],
[[15.021733, 15.025502], [15.647017, 14.662473]],
[[13.344669, 14.229357], [13.313879, 14.173083]],
],
[
[[17.247797, 14.454596], [14.744690, 13.966815]],
[[15.108195, 15.125104], [15.867088, 15.497392]],
[[13.505879, 14.879623], [14.2414055, 15.678431]],
],
[
[[17.298779, 15.792644], [15.138694, 14.043188]],
[[16.187801, 15.579895], [16.051695, 16.435345]],
[[14.028551, 15.433237], [16.645939, 16.288244]],
],
[
[[17.403114, 15.923066], [16.534174, 17.002329]],
[[16.281300, 16.863739], [16.454231, 16.475237]],
[[14.164387, 16.018044], [16.818222, 16.348572]],
],
[
[[17.458706, 17.408989], [17.443968, 17.03850]],
[[17.330350, 16.923946], [16.620153, 16.48794]],
[[15.292369, 16.490143], [17.481287, 16.97861]],
],
],
dtype=np.float32,
)
WEIGHTS = np.array(
[[[0.8, 0.8], [0.8, 0.8]], [[0.5, 0.5], [0.5, 0.5]], [[0.2, 0.2], [0.2, 0.2]]],
dtype=np.float32,
)
BLENDED_PERCENTILE_DATA = np.array(
[
[[12.968588, 12.984587], [12.560181, 12.503394]],
[[14.513496, 13.983413], [14.521752, 13.843809]],
[[15.451672, 15.030349], [14.965419, 14.030121]],
[[16.899292, 15.662327], [15.876251, 15.6937685]],
[[17.333557, 16.205572], [16.516674, 16.478855]],
[[17.458706, 17.408989], [17.481285, 17.0385]],
],
dtype=np.float32,
)
BLENDED_PERCENTILE_DATA_EQUAL_WEIGHTS = np.array(
[
[[12.968588, 12.984587], [12.560181, 12.503394]],
[[13.818380, 14.144759], [14.107645, 13.893876]],
[[14.521132, 15.050104], [15.0750885, 14.874123]],
[[15.715300, 15.560184], [15.989533, 16.09052]],
[[17.251623, 16.184650], [16.609045, 16.474388]],
[[17.458706, 17.408987], [17.481287, 17.0385]],
],
dtype=np.float32,
)
BLENDED_PERCENTILE_DATA_SPATIAL_WEIGHTS = np.array(
[
[[12.968588, 12.984587], [12.560181, 12.503394]],
[[13.381246, 14.15497], [13.719661, 13.880822]],
[[13.743379, 15.042325], [14.906357, 14.731093]],
[[14.121047, 15.546338], [16.071285, 16.017628]],
[[14.9885845, 16.15714], [16.74734, 16.47621]],
[[17.458706, 17.408987], [17.481287, 17.0385]],
],
dtype=np.float32,
)
PERCENTILE_VALUES = np.array(
[
[
12.70237152,
14.83664335,
16.23242317,
17.42014139,
18.42036664,
19.10276753,
19.61048008,
20.27459352,
20.886425,
21.41928051,
22.60297787,
],
[
17.4934137,
20.56739689,
20.96798405,
21.4865958,
21.53586395,
21.55643557,
22.31650746,
23.26993755,
23.62817599,
23.6783294,
24.64542338,
],
[
16.24727652,
17.57784376,
17.9637658,
18.52589225,
18.99357526,
20.50915582,
21.82791334,
21.90645982,
21.95860878,
23.52203933,
23.71409191,
],
]
)
def generate_matching_weights_array(weights, other_dim_length):
"""Broadcast an array of 1D weights (varying along the blend dimension) to
the shape required to match the percentile cube
Args:
weights (numpy.ndarray):
A 1D array of weights varying along the blend dimension
other_dim_length (int):
Length of second dimension required to match percentile cube
Returns:
numpy.ndarray:
Weights that vary along the first (blend) dimension, with second
dimension of required length
"""
shape_t = (other_dim_length, len(weights))
weights_array = np.broadcast_to(weights, shape_t)
return weights_array.astype(np.float32).T
class Test_aggregate(IrisTest):
"""Test the aggregate method"""
def test_blend_percentile_aggregate(self):
"""Test blend_percentile_aggregate function works"""
weights = generate_matching_weights_array([0.6, 0.3, 0.1], 4)
percentiles = np.array([0, 20, 40, 60, 80, 100]).astype(np.float32)
result = PercentileBlendingAggregator.aggregate(
PERCENTILE_DATA, 1, percentiles, weights
)
self.assertArrayAlmostEqual(
result, BLENDED_PERCENTILE_DATA,
)
def test_2D_simple_case(self):
""" Test that for a simple case with only one point in the resulting
array the function behaves as expected"""
weights = generate_matching_weights_array([0.8, 0.2], 1)
percentiles = np.array([0, 50, 100])
perc_data = np.array([[1.0, 2.0], [5.0, 5.0], [10.0, 9.0]])
result = PercentileBlendingAggregator.aggregate(
perc_data, 1, percentiles, weights
)
expected_result = np.array([1.0, 5.0, 10.0])
self.assertArrayAlmostEqual(result, expected_result)
def test_3D_simple_case(self):
""" Test that for a simple case with only one point and an extra
internal dimension behaves as expected"""
weights = generate_matching_weights_array([0.5, 0.5], 1)
percentiles = np.array([0, 50, 100])
perc_data = np.array([[[1.0], [2.0]], [[5.0], [6.0]], [[10.0], [9.0]]])
result = PercentileBlendingAggregator.aggregate(
perc_data, 1, percentiles, weights
)
expected_result = np.array([[1.0], [5.555555], [10.0]])
self.assertArrayAlmostEqual(result, expected_result)
def test_4D_simple_case(self):
""" Test that for a simple case with only one point and 4D input data
it behaves as expected"""
weights = generate_matching_weights_array([0.5, 0.5], 1)
percentiles = np.array([0, 50, 100])
perc_data = np.array([1.0, 3.0, 2.0, 4.0, 5.0, 6.0])
input_shape = (3, 2, 1, 1)
perc_data = perc_data.reshape(input_shape)
result = PercentileBlendingAggregator.aggregate(
perc_data, 1, percentiles, weights
)
expected_result = np.array([[[1.0]], [[3.5]], [[6.0]]])
expected_result_shape = (3, 1, 1)
self.assertArrayAlmostEqual(result, expected_result)
self.assertEqual(result.shape, expected_result_shape)
def test_error_unmatched_weights(self):
"""Test error when weights shape doesn't match length of blend dimension
(in this case 3 weights for 2 blend slices)"""
weights = generate_matching_weights_array([0.7, 0.1, 0.2], 1)
percentiles = np.array([0, 50, 100])
perc_data = np.array([[1.0, 2.0], [5.0, 5.0], [10.0, 9.0]])
with self.assertRaisesRegex(ValueError, "Weights shape does not match data"):
PercentileBlendingAggregator.aggregate(perc_data, 1, percentiles, weights)
class Test_blend_percentiles(IrisTest):
"""Test the blend_percentiles method"""
def test_blend_percentiles(self):
"""Test blend_percentile function works"""
weights = np.array([0.38872692, 0.33041788, 0.2808552])
percentiles = np.array(
[0.0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
)
result = PercentileBlendingAggregator.blend_percentiles(
PERCENTILE_VALUES, percentiles, weights
)
expected_result_array = np.array(
[
12.70237152,
16.65161847,
17.97408712,
18.86356829,
19.84089805,
20.77406153,
21.39078426,
21.73778353,
22.22440125,
23.53863876,
24.64542338,
]
)
self.assertArrayAlmostEqual(result, expected_result_array)
def test_two_percentiles(self):
"""Test that when two percentiles are provided, the extreme values in
the set of thresholds we are blending are returned"""
weights = np.array([0.5, 0.5])
percentiles = np.array([30.0, 60.0])
percentile_values = np.array([[5.0, 8.0], [6.0, 7.0]])
result = PercentileBlendingAggregator.blend_percentiles(
percentile_values, percentiles, weights
)
expected_result = np.array([5.0, 8.0])
self.assertArrayAlmostEqual(result, expected_result)
def test_three_percentiles_symmetric_case(self):
"""Test that when three percentiles are provided the correct values
are returned, not a simple average"""
weights = np.array([0.5, 0.5])
percentiles = np.array([20.0, 50.0, 80.0])
percentile_values = np.array([[5.0, 6.0, 7.0], [5.0, 6.5, 7.0]])
result = PercentileBlendingAggregator.blend_percentiles(
percentile_values, percentiles, weights
)
expected_result = np.array([5.0, 6.2, 7.0])
self.assertArrayAlmostEqual(result, expected_result)
def test_only_one_point_to_blend(self):
"""Test case where there is only one point in the coordinate we are
blending over."""
weights = np.array([1.0])
percentiles = np.array([20.0, 50.0, 80.0])
percentile_values = np.array([[5.0, 6.0, 7.0]])
result = PercentileBlendingAggregator.blend_percentiles(
percentile_values, percentiles, weights
)
expected_result = np.array([5.0, 6.0, 7.0])
self.assertArrayAlmostEqual(result, expected_result)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | cd37ecb28f05859ed28cdddfc439915b | 37.454545 | 86 | 0.59118 | 3.302908 | false | true | false | false |
metoppv/improver | improver/cli/__init__.py | 3 | 19033 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""init for cli and clize"""
import pathlib
import shlex
import time
from collections import OrderedDict
from functools import partial
import clize
from clize import parameters
from clize.help import ClizeHelp, HelpForAutodetectedDocstring
from clize.parser import value_converter
from clize.runner import Clize
from sigtools.wrappers import decorator
# Imports are done in their functions to make calls to -h quicker.
# selected clize imports/constants
IGNORE = clize.Parameter.IGNORE
LAST_OPTION = clize.Parameter.LAST_OPTION
REQUIRED = clize.Parameter.REQUIRED
UNDOCUMENTED = clize.Parameter.UNDOCUMENTED
# help helpers
def docutilize(obj):
"""Convert Numpy or Google style docstring into reStructuredText format.
Args:
obj (str or object):
Takes an object and changes it's docstrings to a reStructuredText
format.
Returns:
str or object:
A converted string or an object with replaced docstring depending
on the type of the input.
"""
from inspect import cleandoc, getdoc
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
if isinstance(obj, str):
doc = cleandoc(obj)
else:
doc = getdoc(obj)
doc = str(NumpyDocstring(doc))
doc = str(GoogleDocstring(doc))
doc = doc.replace(":exc:", "")
doc = doc.replace(":data:", "")
doc = doc.replace(":keyword", ":param")
doc = doc.replace(":kwtype", ":type")
if isinstance(obj, str):
return doc
obj.__doc__ = doc
return obj
class HelpForNapoleonDocstring(HelpForAutodetectedDocstring):
"""Subclass to add support for google style docstrings"""
def add_docstring(self, docstring, *args, **kwargs):
"""Adds the updated docstring."""
docstring = docutilize(docstring)
super().add_docstring(docstring, *args, **kwargs)
class DocutilizeClizeHelp(ClizeHelp):
"""Subclass to build Napoleon docstring from subject."""
def __init__(self, subject, owner, builder=HelpForNapoleonDocstring.from_subject):
super().__init__(subject, owner, builder)
# input handling
class ObjectAsStr(str):
"""Hide object under a string to pass it through Clize parser."""
__slots__ = ("original_object",)
def __new__(cls, obj, name=None):
if isinstance(obj, cls): # pass object through if already wrapped
return obj
if name is None:
name = cls.obj_to_name(obj)
self = str.__new__(cls, name)
self.original_object = obj
return self
def __hash__(self):
# make sure our hash doesn't clash with normal string hash
return super().__hash__(self) ^ hash(type(self))
@staticmethod
def obj_to_name(obj, cls=None):
"""Helper function to create the string."""
if isinstance(obj, str):
return obj
if cls is None:
cls = type(obj)
try:
obj_id = hash(obj)
except TypeError:
obj_id = id(obj)
return "<%s.%s@%i>" % (cls.__module__, cls.__name__, obj_id)
def maybe_coerce_with(converter, obj, **kwargs):
"""Apply converter if str, pass through otherwise."""
obj = getattr(obj, "original_object", obj)
return converter(obj, **kwargs) if isinstance(obj, str) else obj
@value_converter
def inputcube(to_convert):
"""Loads cube from file or returns passed object.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube
return maybe_coerce_with(load_cube, to_convert)
@value_converter
def inputcube_nolazy(to_convert):
"""Loads cube from file or returns passed object.
Where a load is performed, it will not have lazy data.
Args:
to_convert (string or iris.cube.Cube):
File name or Cube object.
Returns:
Loaded cube or passed object.
"""
from improver.utilities.load import load_cube
if getattr(to_convert, "has_lazy_data", False):
# Realise data if lazy
to_convert.data
return maybe_coerce_with(load_cube, to_convert, no_lazy_load=True)
@value_converter
def inputcubelist(to_convert):
"""Loads a cubelist from file or returns passed object.
Args:
to_convert (string or iris.cube.CubeList):
File name or CubeList object.
Returns:
Loaded cubelist or passed object.
"""
from improver.utilities.load import load_cubelist
return maybe_coerce_with(load_cubelist, to_convert)
@value_converter
def inputjson(to_convert):
"""Loads json from file or returns passed object.
Args:
to_convert (string or dict):
File name or json dictionary.
Returns:
Loaded json dictionary or passed object.
"""
from improver.utilities.cli_utilities import load_json_or_none
return maybe_coerce_with(load_json_or_none, to_convert)
@value_converter
def comma_separated_list(to_convert):
"""Converts comma separated string to list or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
"""
return maybe_coerce_with(lambda s: s.split(","), to_convert)
@value_converter
def comma_separated_list_of_float(to_convert):
"""Converts comma separated string to list of floats or returns passed object.
Args:
to_convert (string or list)
comma separated string or list
Returns:
list
"""
return maybe_coerce_with(
lambda string: [float(s) for s in string.split(",")], to_convert
)
@value_converter
def inputpath(to_convert):
"""Converts string paths to pathlib Path objects
Args:
to_convert (string or pathlib.Path):
path represented as string
Returns:
(pathlib.Path): Path object
"""
return maybe_coerce_with(pathlib.Path, to_convert)
@value_converter
def inputdatetime(to_convert):
"""Converts string to datetime or returns passed object.
Args:
to_convert (string or datetime):
datetime represented as string of the format YYYYMMDDTHHMMZ
Returns:
(datetime): datetime object
"""
from improver.utilities.temporal import cycletime_to_datetime
return maybe_coerce_with(cycletime_to_datetime, to_convert)
def create_constrained_inputcubelist_converter(*constraints):
"""Makes function that the input constraints are used in a loop.
The function is a @value_converter, this means it is used by clize to convert
strings into objects.
This is a way of not using the IMPROVER load_cube which will try to merge
cubes. Iris load on the other hand won't deal with meta data properly.
So an example is if you wanted to load an X cube and a Y cube from a cubelist
of 2. You call this function with a list of constraints.
These cubes get loaded and returned as a CubeList.
Args:
*constraints (tuple of str or callable or iris.Constraint):
Constraints to be used in extracting the required cubes.
Each constraint must match exactly one cube and extracted cubes
will be sorted to match their order.
A constraint can be an iris.Constraint object or a callable
or cube name that can be used to construct one.
Returns:
callable:
A function with the constraints used for a list comprehension.
"""
@value_converter
def constrained_inputcubelist_converter(to_convert):
"""Passes the cube and constraints onto maybe_coerce_with.
Args:
to_convert (str or iris.cube.CubeList):
A CubeList or a filename to be loaded into a CubeList.
Returns:
iris.cube.CubeList:
The loaded cubelist of constrained cubes.
"""
from iris import Constraint
from iris.cube import CubeList
from improver.utilities.load import load_cubelist
cubelist = maybe_coerce_with(load_cubelist, to_convert)
return CubeList(
cubelist.extract_cube(
Constraint(cube_func=constr) if callable(constr) else constr
)
for constr in constraints
)
return constrained_inputcubelist_converter
# output handling
@decorator
def with_output(
wrapped,
*args,
output=None,
pass_through_output=False,
compression_level=1,
least_significant_digit: int = None,
**kwargs,
):
"""Add `output` keyword only argument.
Add `compression_level` option.
Add `least_significant_digit` option.
This is used to add extra `output`, `compression_level` and `least_significant_digit` CLI
options. If `output` is provided, it saves the result of calling `wrapped` to file and returns
None, otherwise it returns the result. If `compression_level` is provided, it compresses the
data with the provided compression level (or not, if `compression_level` 0). If
`least_significant_digit` provided, it will quantize the data to a certain number of
significant figures.
Args:
wrapped (obj):
The function to be wrapped.
output (str, optional):
Output file name. If not supplied, the output object will be
printed instead.
pass_through_output (bool):
Pass through the output object even if saved to file.
Used in pipelines of commands if intermediate output needs to be saved.
compression_level (int):
Will set the compression level (1 to 9), or disable compression (0).
least_significant_digit (int):
If specified will truncate the data to a precision given by
10**(-least_significant_digit), e.g. if least_significant_digit=2, then the data will
be quantized to a precision of 0.01 (10**(-2)). See
http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml
for details. When used with `compression level`, this will result in lossy
compression.
Returns:
Result of calling `wrapped` or None if `output` is given.
"""
from improver.utilities.save import save_netcdf
result = wrapped(*args, **kwargs)
if output and result:
save_netcdf(result, output, compression_level, least_significant_digit)
if pass_through_output:
return ObjectAsStr(result, output)
return
return result
# cli object creation
def clizefy(obj=None, helper_class=DocutilizeClizeHelp, **kwargs):
"""Decorator for creating CLI objects.
"""
if obj is None:
return partial(clizefy, helper_class=helper_class, **kwargs)
if hasattr(obj, "cli"):
return obj
if not callable(obj):
return Clize.get_cli(obj, **kwargs)
return Clize.keep(obj, helper_class=helper_class, **kwargs)
# help command
@clizefy(help_names=())
def improver_help(prog_name: parameters.pass_name, command=None, *, usage=False):
"""Show command help."""
prog_name = prog_name.split()[0]
args = filter(None, [command, "--help", usage and "--usage"])
result = execute_command(SUBCOMMANDS_DISPATCHER, prog_name, *args)
if not command and usage:
result = "\n".join(
line
for line in result.splitlines()
if not line.endswith("--help [--usage]")
)
return result
def command_executor(*argv, verbose=False, dry_run=False):
"""Common entry point for straight command execution."""
return execute_command(
SUBCOMMANDS_DISPATCHER, *argv, verbose=verbose, dry_run=dry_run
)
def _cli_items():
"""Dynamically discover CLIs."""
import importlib
import pkgutil
from improver.cli import __path__ as improver_cli_pkg_path
yield ("help", improver_help)
for minfo in pkgutil.iter_modules(improver_cli_pkg_path):
mod_name = minfo.name
if mod_name != "__main__":
mcli = importlib.import_module("improver.cli." + mod_name)
yield (mod_name, clizefy(mcli.process))
SUBCOMMANDS_TABLE = OrderedDict(sorted(_cli_items()))
# main CLI object with subcommands
SUBCOMMANDS_DISPATCHER = clizefy(
SUBCOMMANDS_TABLE,
description="""IMPROVER NWP post-processing toolbox""",
footnotes="""See also improver --help for more information.""",
)
# IMPROVER top level main
def unbracket(args):
"""Convert input list with bracketed items into nested lists.
>>> unbracket('foo [ bar a b ] [ baz c ] -o z'.split())
['foo', ['bar', 'a', 'b'], ['baz', 'c'], '-o', 'z']
"""
outargs = []
stack = []
mismatch_msg = "Mismatched bracket at position %i."
for i, arg in enumerate(args):
if arg == "[":
stack.append(outargs)
outargs = []
elif arg == "]":
if not stack:
raise ValueError(mismatch_msg % i)
stack[-1].append(outargs)
outargs = stack.pop()
else:
outargs.append(arg)
if stack:
raise ValueError(mismatch_msg % len(args))
return outargs
class TimeIt:
def __init__(self, verbose=False):
self._verbose = verbose
self._elapsed = None
self._start = None
def __enter__(self):
self._start = time.perf_counter()
return self
def __exit__(self, *args):
self._elapsed = time.perf_counter() - self._start
if self._verbose:
print(str(self))
@property
def elapsed(self):
"""Return elapsed time in seconds."""
return self._elapsed
def __str__(self):
"""Print elapsed time in seconds."""
return f"Run-time: {self._elapsed}s"
def execute_command(dispatcher, prog_name, *args, verbose=False, dry_run=False):
"""Common entry point for command execution."""
args = list(args)
for i, arg in enumerate(args):
if isinstance(arg, (list, tuple)):
# process nested commands recursively
arg = execute_command(
dispatcher, prog_name, *arg, verbose=verbose, dry_run=dry_run
)
if isinstance(arg, pathlib.PurePath):
arg = str(arg)
elif not isinstance(arg, str):
arg = ObjectAsStr(arg)
args[i] = arg
msg = " ".join([shlex.quote(x) for x in (prog_name, *args)])
if dry_run:
if verbose:
print(msg)
return args
with TimeIt() as timeit:
result = dispatcher(prog_name, *args)
if verbose:
print(f"{timeit}; {msg}")
if result is not None:
print(ObjectAsStr.obj_to_name(result))
return result
@clizefy()
def main(
prog_name: parameters.pass_name,
command: LAST_OPTION,
*args,
profile: value_converter(lambda _: _, name="FILENAME") = None, # noqa: F821
memprofile: value_converter(lambda _: _, name="FILENAME") = None, # noqa: F821
verbose=False,
dry_run=False,
):
"""IMPROVER NWP post-processing toolbox
Results from commands can be passed into file-like arguments
of other commands by surrounding them by square brackets::
improver command [ command ... ] ...
Spaces around brackets are mandatory.
Args:
prog_name:
The program name from argv[0].
command (str):
Command to execute
args (tuple):
Command arguments
profile (str):
If given, will write profiling to the file given.
To write to stdout, use a hyphen (-)
memprofile (str):
Creates 2 files by adding a suffix to the provided arguemnt -
a tracemalloc snapshot at the point of highest memory consumption
of your program (suffixed with _SNAPSHOT)
and a track of the maximum memory used by your program
over time (suffixed with _MAX_TRACKER).
verbose (bool):
Print executed commands
dry_run (bool):
Print commands to be executed
See improver help [--usage] [command] for more information
on available command(s).
"""
args = unbracket(args)
exec_cmd = execute_command
if profile is not None:
from improver.profile import profile_hook_enable
profile_hook_enable(dump_filename=None if profile == "-" else profile)
if memprofile is not None:
from improver.memprofile import memory_profile_decorator
exec_cmd = memory_profile_decorator(exec_cmd, memprofile)
result = exec_cmd(
SUBCOMMANDS_DISPATCHER,
prog_name,
command,
*args,
verbose=verbose,
dry_run=dry_run,
)
return result
def run_main(argv=None):
"""Overrides argv[0] to be 'improver' then runs main.
Args:
argv (list of str):
Arguments that were from the command line.
"""
import sys
from clize import run
# clize help shows module execution as `python -m improver.cli`
# override argv[0] and pass it explicitly in order to avoid this
# so that the help command reflects the way that we call improver.
if argv is None:
argv = sys.argv[:]
argv[0] = "improver"
run(main, args=argv)
| bsd-3-clause | f9b011c417d1b87aa47488b280b9a62d | 29.599678 | 98 | 0.643146 | 4.09488 | false | false | false | false |
metoppv/improver | improver_tests/acceptance/test_cloud_top_temperature.py | 2 | 2643 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for the cloud-top-temperature CLI"""
import pytest
from . import acceptance as acc
pytestmark = [pytest.mark.acc, acc.skip_if_kgo_missing]
CLI = acc.cli_name_with_dashes(__file__)
run_cli = acc.run_cli(CLI)
@pytest.mark.parametrize("model_id_attr", (True, False))
def test_basic(tmp_path, model_id_attr):
"""Test cloud-top-temperature usage, with and without model_id_attr"""
test_dir = acc.kgo_root() / "cloud-top-temperature"
output_path = tmp_path / "output.nc"
args = [
test_dir / "cloud_condensation_level.nc",
test_dir / "temperature_on_pressure_levels.nc",
"--least-significant-digit",
"2",
"--output",
output_path,
]
if model_id_attr:
args += ["--model-id-attr", "mosg__model_configuration"]
kgo_dir = test_dir / "with_id_attr"
else:
kgo_dir = test_dir / "without_id_attr"
kgo_path = kgo_dir / "kgo.nc"
run_cli(args)
acc.compare(output_path, kgo_path)
| bsd-3-clause | 991951fcc172dc00a7bc0611366d767f | 41.629032 | 79 | 0.695044 | 3.933036 | false | true | false | false |
metoppv/improver | improver_tests/ensemble_copula_coupling/test_EnsembleReordering.py | 1 | 26371 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_copula_coupling.EnsembleReordering` plugin.
"""
import itertools
import unittest
import numpy as np
from iris.cube import Cube
from iris.tests import IrisTest
from improver.ensemble_copula_coupling.ensemble_copula_coupling import (
EnsembleReordering as Plugin,
)
from improver.synthetic_data.set_up_test_cubes import (
set_up_percentile_cube,
set_up_variable_cube,
)
from improver.utilities.warnings_handler import ManageWarnings
from .ecc_test_data import ECC_TEMPERATURE_REALIZATIONS
class Test__recycle_raw_ensemble_realizations(IrisTest):
"""
Test the _recycle_raw_ensemble_realizations
method in the EnsembleReordering plugin.
"""
def setUp(self):
"""
Create a cube with a realization coordinate and a cube with a
percentile coordinate with forecast_reference_time and
forecast_period coordinates.
"""
data = np.tile(np.linspace(5, 10, 9), 3).reshape(3, 3, 3)
data[0] -= 1
data[1] += 1
data[2] += 3
self.realization_cube = set_up_variable_cube(
data.astype(np.float32), name="air_temperature", units="degC"
)
self.percentile_cube = set_up_percentile_cube(
np.sort(data.astype(np.float32), axis=0),
np.array([10, 50, 90], dtype=np.float32),
name="air_temperature",
units="degC",
)
self.perc_coord = "percentile"
def test_realization_for_equal(self):
"""
Test to check the behaviour when the number of percentiles equals
the number of realizations.
"""
expected_data = np.array(
[
[[4.0, 4.625, 5.25], [5.875, 6.5, 7.125], [7.75, 8.375, 9.0]],
[[6.0, 6.625, 7.25], [7.875, 8.5, 9.125], [9.75, 10.375, 11.0]],
[[8.0, 8.625, 9.25], [9.875, 10.5, 11.125], [11.75, 12.375, 13.0]],
]
)
result = Plugin()._recycle_raw_ensemble_realizations(
self.percentile_cube, self.realization_cube, self.perc_coord,
)
self.assertIsInstance(result, Cube)
self.assertArrayEqual(result.coord("realization").points, [0, 1, 2])
self.assertArrayAlmostEqual(result.data, expected_data)
def test_realization_for_greater_than(self):
"""
Test to check the behaviour when the number of percentiles is
greater than the number of realizations.
"""
expected_data = np.array(
[
[[4.0, 4.625, 5.25], [5.875, 6.5, 7.125], [7.75, 8.375, 9.0]],
[[6.0, 6.625, 7.25], [7.875, 8.5, 9.125], [9.75, 10.375, 11.0]],
[[4.0, 4.625, 5.25], [5.875, 6.5, 7.125], [7.75, 8.375, 9.0]],
]
)
raw_forecast_realizations = self.realization_cube[:2, :, :]
raw_forecast_realizations.coord("realization").points = [12, 13]
result = Plugin()._recycle_raw_ensemble_realizations(
self.percentile_cube, raw_forecast_realizations, self.perc_coord,
)
self.assertIsInstance(result, Cube)
self.assertArrayEqual(result.coord("realization").points, [12, 13, 14])
self.assertArrayAlmostEqual(result.data, expected_data)
def test_realization_for_less_than(self):
"""
Test to check the behaviour when the number of percentiles is
less than the number of realizations.
"""
expected_data = np.array(
[
[[4.0, 4.625, 5.25], [5.875, 6.5, 7.125], [7.75, 8.375, 9.0]],
[[6.0, 6.625, 7.25], [7.875, 8.5, 9.125], [9.75, 10.375, 11.0]],
]
)
post_processed_forecast_percentiles = self.percentile_cube[:2, :, :]
result = Plugin()._recycle_raw_ensemble_realizations(
post_processed_forecast_percentiles, self.realization_cube, self.perc_coord,
)
self.assertIsInstance(result, Cube)
self.assertArrayEqual(result.coord("realization").points, [0, 1])
self.assertArrayAlmostEqual(result.data, expected_data)
class Test_rank_ecc(IrisTest):
"""Test the rank_ecc method in the EnsembleReordering plugin."""
def setUp(self):
"""
Create a cube with forecast_reference_time and
forecast_period coordinates.
"""
self.cube = set_up_variable_cube(ECC_TEMPERATURE_REALIZATIONS.copy())
self.cube_2d = self.cube[:, :2, 0].copy()
def test_basic(self):
"""Test that the plugin returns an iris.cube.Cube."""
raw_data = np.array(
[
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[2, 2, 2], [2, 2, 2], [2, 2, 2]],
[[3, 3, 3], [3, 3, 3], [3, 3, 3]],
]
)
calibrated_data = np.array(
[
[
[0.71844843, 0.71844843, 0.71844843],
[0.71844843, 0.71844843, 0.71844843],
[0.71844843, 0.71844843, 0.71844843],
],
[[2.0, 2.0, 2.0], [2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
[
[3.28155157, 3.28155157, 3.28155157],
[3.28155157, 3.28155157, 3.28155157],
[3.28155157, 3.28155157, 3.28155157],
],
]
)
raw_cube = self.cube.copy(data=raw_data)
calibrated_cube = self.cube.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertIsInstance(result, Cube)
def test_ordered_data(self):
"""
Test that the plugin returns an Iris Cube where the cube data is an
ordered numpy array for the calibrated data with the same ordering
as the raw data.
"""
raw_data = np.array(
[
[[1, 1, 1], [1, 1, 1], [1, 1, 1]],
[[2, 2, 2], [2, 2, 2], [2, 2, 2]],
[[3, 3, 3], [3, 3, 3], [3, 3, 3]],
]
)
raw_cube = self.cube.copy(data=raw_data)
calibrated_cube = raw_cube.copy()
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertArrayAlmostEqual(result.data, calibrated_cube.data)
def test_unordered_data(self):
"""
Test that the plugin returns an iris.cube.Cube with the correct data.
ECC orders the calibrated data based on the ordering of the raw data.
This could mean that the calibrated data appears out of order.
ECC does not reorder the calibrated data in a monotonically-increasing
order.
"""
raw_data = np.array(
[
[[5, 5, 5], [7, 5, 5], [5, 5, 5]],
[[4, 4, 4], [4, 4, 4], [4, 4, 4]],
[[6, 6, 6], [6, 6, 6], [6, 6, 6]],
]
)
calibrated_data = np.array(
[
[[4, 5, 4], [4, 5, 4], [4, 5, 4]],
[[5, 6, 5], [5, 6, 5], [5, 6, 5]],
[[6, 7, 6], [6, 7, 6], [6, 7, 6]],
]
)
# This reordering does not pay attention to the values within the
# calibrated data, the rankings created to perform the sorting are
# taken exclusively from the raw_data.
result_data = np.array(
[
[[5, 6, 5], [6, 6, 5], [5, 6, 5]],
[[4, 5, 4], [4, 5, 4], [4, 5, 4]],
[[6, 7, 6], [5, 7, 6], [6, 7, 6]],
]
)
raw_cube = self.cube.copy(data=raw_data)
calibrated_cube = self.cube.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertArrayAlmostEqual(result.data, result_data)
def test_2d_cube(self):
"""Test that the plugin returns the correct cube data for a
2d input cube."""
raw_data = np.array([[1, 1], [3, 2], [2, 3]])
calibrated_data = np.array([[1, 1], [2, 2], [3, 3]])
result_data = raw_data.copy()
raw_cube = self.cube_2d.copy(data=raw_data)
calibrated_cube = self.cube_2d.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertArrayAlmostEqual(result.data, result_data)
def test_2d_cube_masked(self):
"""Test that the plugin returns the correct cube data for a
2d input cube with a mask applied to each realization."""
mask = np.array([[True, False], [True, False], [True, False]])
raw_data = np.array([[1, 9], [3, 5], [2, 7]])
calibrated_data = np.ma.MaskedArray(
[[1, 6], [2, 8], [3, 10]], mask=mask, dtype=np.float32
)
result_data = np.array(
[[np.nan, 10], [np.nan, 6], [np.nan, 8]], dtype=np.float32
)
raw_cube = self.cube_2d.copy(data=raw_data)
calibrated_cube = self.cube_2d.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertArrayAlmostEqual(result.data.data, result_data)
self.assertArrayEqual(result.data.mask, mask)
self.assertEqual(result.data.dtype, np.float32)
def test_2d_cube_masked_nans(self):
"""Test that the plugin returns the correct cube data for a
2d input cube with a mask applied to each realization, and there are
nans under the mask."""
mask = np.array([[True, False], [True, False], [True, False]])
raw_data = np.array([[1, 9], [3, 5], [2, 7]])
calibrated_data = np.ma.MaskedArray(
[[np.nan, 6], [np.nan, 8], [np.nan, 10]], mask=mask, dtype=np.float32
)
result_data = np.array(
[[np.nan, 10], [np.nan, 6], [np.nan, 8]], dtype=np.float32
)
raw_cube = self.cube_2d.copy(data=raw_data)
calibrated_cube = self.cube_2d.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertArrayAlmostEqual(result.data.data, result_data)
self.assertArrayEqual(result.data.mask, mask)
self.assertEqual(result.data.dtype, np.float32)
def test_2d_cube_tied_values(self):
"""
Test that the plugin returns the correct cube data for a
2d input cube, when there are tied values witin the
raw ensemble realizations. As there are two possible options for the
result data, as the tie is decided randomly, both possible result
data options are checked.
"""
raw_data = np.array([[1, 1], [3, 2], [2, 2]])
calibrated_data = np.array([[1, 1], [2, 2], [3, 3]])
# Reordering of the calibrated_data array to match
# the raw_data ordering
result_data_first = np.array([[1, 1], [3, 2], [2, 3]])
result_data_second = np.array([[1, 1], [3, 3], [2, 2]])
raw_cube = self.cube_2d.copy(data=raw_data)
calibrated_cube = self.cube_2d.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
permutations = [result_data_first, result_data_second]
matches = [np.array_equal(aresult, result.data) for aresult in permutations]
self.assertIn(True, matches)
def test_2d_cube_tied_values_random_seed(self):
"""
Test that the plugin returns the correct cube data for a
2d input cube, when there are tied values witin the
raw ensemble realizations. The random seed is specified to ensure that
only one option, out of the two possible options will be returned.
"""
raw_data = np.array([[1, 1], [3, 2], [2, 2]])
calibrated_data = np.array([[1, 1], [2, 2], [3, 3]])
result_data = np.array([[1, 1], [3, 2], [2, 3]])
raw_cube = self.cube_2d.copy(data=raw_data)
calibrated_cube = self.cube_2d.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube, random_seed=0)
self.assertArrayAlmostEqual(result.data, result_data)
def test_1d_cube(self):
"""
Test that the plugin returns the correct cube data for a
1d input cube.
"""
raw_data = np.array([3, 2, 1])
calibrated_data = np.array([1, 2, 3])
result_data = raw_data.copy()
cube = self.cube[:, 0, 0].copy()
raw_cube = cube.copy(data=raw_data)
calibrated_cube = cube.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube)
self.assertArrayAlmostEqual(result.data, result_data)
def test_1d_cube_random_ordering(self):
"""
Test that the plugin returns the correct cube data for a
1d input cube, if random ordering is selected.
Random ordering does not use the ordering from the raw ensemble,
and instead just orders the input values randomly.
"""
raw_data = np.array([3, 2, 1])
calibrated_data = np.array([1, 2, 3])
cube = self.cube[:, 0, 0].copy()
raw_cube = cube.copy(data=raw_data)
calibrated_cube = cube.copy(data=calibrated_data)
result = Plugin().rank_ecc(calibrated_cube, raw_cube, random_ordering=True)
permutations = list(itertools.permutations(raw_data))
permutations = [np.array(permutation) for permutation in permutations]
matches = [np.array_equal(aresult, result.data) for aresult in permutations]
self.assertIn(True, matches)
class Test__check_input_cube_masks(IrisTest):
"""Test the _check_input_cube_masks method in the EnsembleReordering plugin."""
def setUp(self):
"""
Create a raw realization forecast with a fixed set of realization
numbers and a percentile cube following ensemble reordering.
"""
self.raw_cube = set_up_variable_cube(
ECC_TEMPERATURE_REALIZATIONS.copy(), realizations=[10, 11, 12]
)
self.post_processed_percentiles = set_up_percentile_cube(
np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0),
np.array([25, 50, 75], dtype=np.float32),
)
def test_unmasked_data(self):
"""Test unmasked data does not raise any errors."""
Plugin._check_input_cube_masks(self.post_processed_percentiles, self.raw_cube)
def test_only_one_post_processed_forecast_masked(self):
"""
Test no error is raised if only the post_processed_forecast is masked.
"""
self.post_processed_percentiles.data[:, 0, 0] = np.nan
self.post_processed_percentiles.data = np.ma.masked_invalid(
self.post_processed_percentiles.data
)
Plugin._check_input_cube_masks(self.post_processed_percentiles, self.raw_cube)
def test_only_raw_cube_masked(self):
"""
Test an error is raised if only the raw_cube is masked.
"""
self.raw_cube.data[:, 0, 0] = np.nan
self.raw_cube.data = np.ma.masked_invalid(self.raw_cube.data)
message = (
"The raw_forecast provided has a mask, but the post_processed_forecast "
"isn't masked. The post_processed_forecast and the raw_forecast "
"should have the same mask applied to them."
)
with self.assertRaisesRegex(ValueError, message):
Plugin._check_input_cube_masks(
self.post_processed_percentiles, self.raw_cube
)
def test_post_processed_forecast_inconsistent_mask(self):
"""Test an error is raised if the post_processed_forecast has an
inconsistent mask.
"""
self.post_processed_percentiles.data[2, 0, 0] = np.nan
self.post_processed_percentiles.data = np.ma.masked_invalid(
self.post_processed_percentiles.data
)
self.raw_cube.data[:, 0, 0] = np.nan
self.raw_cube.data = np.ma.masked_invalid(self.raw_cube.data)
message = (
"The post_processed_forecast does not have same mask on all x-y slices"
)
with self.assertRaisesRegex(ValueError, message):
Plugin._check_input_cube_masks(
self.post_processed_percentiles, self.raw_cube
)
def test_raw_forecast_inconsistent_mask(self):
"""Test an error is raised if the raw_forecast has an
inconsistent mask.
"""
self.post_processed_percentiles.data[:, 0, 0] = np.nan
self.post_processed_percentiles.data = np.ma.masked_invalid(
self.post_processed_percentiles.data
)
self.raw_cube.data[2, 0, 0] = np.nan
self.raw_cube.data = np.ma.masked_invalid(self.raw_cube.data)
message = (
"The raw_forecast x-y slices do not all have the"
" same mask as the post_processed_forecast."
)
with self.assertRaisesRegex(ValueError, message):
Plugin._check_input_cube_masks(
self.post_processed_percentiles, self.raw_cube
)
def test_consistent_masks(self):
"""Test no error is raised if the raw_forecast and
post_processed_forecast have consistent masks.
"""
self.post_processed_percentiles.data[:, 0, 0] = np.nan
self.post_processed_percentiles.data = np.ma.masked_invalid(
self.post_processed_percentiles.data
)
self.raw_cube.data[:, 0, 0] = np.nan
self.raw_cube.data = np.ma.masked_invalid(self.raw_cube.data)
Plugin._check_input_cube_masks(self.post_processed_percentiles, self.raw_cube)
class Test_process(IrisTest):
"""Test the EnsembleReordering plugin."""
def setUp(self):
"""
Create a raw realization forecast with a fixed set of realization
numbers and a percentile cube following ensemble reordering.
"""
self.raw_cube = set_up_variable_cube(
ECC_TEMPERATURE_REALIZATIONS.copy(), realizations=[10, 11, 12]
)
self.post_processed_percentiles = set_up_percentile_cube(
np.sort(ECC_TEMPERATURE_REALIZATIONS.copy(), axis=0),
np.array([25, 50, 75], dtype=np.float32),
)
@ManageWarnings(ignored_messages=["Only a single cube so no differences"])
def test_basic(self):
"""
Test that the plugin returns an iris.cube.Cube, the cube has a
realization coordinate with specific realization numbers and is
correctly re-ordered to match the source realizations.
"""
expected_data = self.raw_cube.data.copy()
result = Plugin().process(self.post_processed_percentiles, self.raw_cube)
self.assertIsInstance(result, Cube)
self.assertTrue(result.coords("realization"))
self.assertEqual(
result.coord("realization"), self.raw_cube.coord("realization")
)
self.assertArrayAlmostEqual(result.data, expected_data)
@ManageWarnings(ignored_messages=["Only a single cube so no differences"])
def test_basic_masked_input_data(self):
"""
Test that the plugin returns an iris.cube.Cube, the cube has a
realization coordinate with specific realization numbers and is
correctly re-ordered to match the source realizations, when the
input data is masked.
"""
# Assuming input data and raw ensemble are masked in the same way.
self.raw_cube.data[:, 0, 0] = np.nan
self.raw_cube.data = np.ma.masked_invalid(self.raw_cube.data)
self.post_processed_percentiles.data[:, 0, 0] = np.nan
self.post_processed_percentiles.data = np.ma.masked_invalid(
self.post_processed_percentiles.data
)
expected_data = self.raw_cube.data.copy()
result = Plugin().process(self.post_processed_percentiles, self.raw_cube)
self.assertIsInstance(result, Cube)
self.assertTrue(result.coords("realization"))
self.assertEqual(
result.coord("realization"), self.raw_cube.coord("realization")
)
self.assertArrayAlmostEqual(result.data, expected_data)
self.assertArrayEqual(result.data.mask, expected_data.mask)
@ManageWarnings(ignored_messages=["Only a single cube so no differences"])
def test_basic_masked_input_data_not_nans(self):
"""
Test that the plugin returns an iris.cube.Cube, the cube has a
realization coordinate with specific realization numbers and is
correctly re-ordered to match the source realizations, when the
input data is masked and the masked data is not a nan.
"""
# Assuming input data and raw ensemble are masked in the same way.
self.raw_cube.data[:, 0, 0] = 1000
self.raw_cube.data = np.ma.masked_equal(self.raw_cube.data, 1000)
self.post_processed_percentiles.data[:, 0, 0] = 1000
self.post_processed_percentiles.data = np.ma.masked_equal(
self.post_processed_percentiles.data, 1000
)
expected_data = self.raw_cube.data.copy()
result = Plugin().process(self.post_processed_percentiles, self.raw_cube)
self.assertIsInstance(result, Cube)
self.assertTrue(result.coords("realization"))
self.assertEqual(
result.coord("realization"), self.raw_cube.coord("realization")
)
self.assertArrayAlmostEqual(result.data, expected_data)
self.assertArrayEqual(result.data.mask, expected_data.mask)
@ManageWarnings(ignored_messages=["Only a single cube so no differences"])
def test_1d_cube_random_ordering(self):
"""
Test that the plugin returns the correct cube data for a
1d input cube, if random ordering is selected.
"""
raw_data = np.array([3, 2, 1])
post_processed_percentiles_data = np.array([1, 2, 3])
raw_cube = self.raw_cube[:, 0, 0]
raw_cube.data = raw_data
post_processed_percentiles = self.post_processed_percentiles[:, 0, 0]
post_processed_percentiles.data = post_processed_percentiles_data
result = Plugin().process(
post_processed_percentiles, raw_cube, random_ordering=True
)
permutations = list(itertools.permutations(raw_data))
permutations = [np.array(permutation) for permutation in permutations]
matches = [np.array_equal(aresult, result.data) for aresult in permutations]
self.assertIn(True, matches)
@ManageWarnings(ignored_messages=["Only a single cube so no differences"])
def test_1d_cube_recycling_raw_ensemble_realizations(self):
"""
Test that the plugin returns the correct cube data for a
1d input cube, if the number of raw ensemble realizations is fewer
than the number of percentiles required, and therefore, raw
ensemble realization recycling is required.
Case where two raw ensemble realizations are exactly the same,
after the raw ensemble realizations have been recycled.
The number of raw ensemble realizations are recycled in order to match
the number of percentiles.
After recycling the raw _data will be
raw_data = np.array([[1],
[2],
[1]])
If there's a tie, the re-ordering randomly allocates the ordering
for the data from the raw ensemble realizations, which is why there are
two possible options for the resulting post-processed ensemble
realizations.
Raw ensemble realizations
1, 2
Post-processed percentiles
1, 2, 3
After recycling raw ensemble realizations
1, 2, 1
As the second ensemble realization(with a data value of 2), is the
highest value, the highest value from the post-processed percentiles
will be the second ensemble realization data value within the
post-processed realizations. The data values of 1 and 2 from the
post-processed percentiles will then be split between the first
and third post-processed ensemble realizations.
"""
raw_data = np.array([1, 2])
post_processed_percentiles_data = np.array([1, 2, 3])
expected_first = np.array([1, 3, 2])
expected_second = np.array([2, 3, 1])
raw_cube = self.raw_cube[:2, 0, 0]
raw_cube.data = raw_data
post_processed_percentiles = self.post_processed_percentiles[:, 0, 0]
post_processed_percentiles.data = post_processed_percentiles_data
result = Plugin().process(post_processed_percentiles, raw_cube)
permutations = [expected_first, expected_second]
matches = [np.array_equal(aresult, result.data) for aresult in permutations]
self.assertIn(True, matches)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 96af916c76de30288aec7250de22486a | 39.821981 | 88 | 0.605589 | 3.670796 | false | true | false | false |
metoppv/improver | improver/cli/generate_topography_bands_weights.py | 3 | 4407 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to run topographic bands weights generation."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
orography: cli.inputcube,
land_sea_mask: cli.inputcube = None,
*,
bands_config: cli.inputjson = None,
):
"""Runs topographic weights generation.
Reads the orography and land_sea_mask fields of a cube. Creates a series of
topographic zone weights to indicate where an orography point sits within
the defined topographic bands. If the orography point is in the centre of
a topographic band, then a single band will have a weight 1.0.
If the orography point is at the edge of a topographic band, then the
upper band will have a 0.5 weight whilst the lower band will also have a
0.5 weight. Otherwise the weight will vary linearly between the centre of
a topographic band and the edge.
Args:
orography (iris.cube.Cube):
The orography on a standard grid.
land_sea_mask (iris.cube.Cube):
Land mask on a standard grid, with land points set to one and
sea points set to zero. If provided, sea points will be
masked and set to the default fill value. If no land mask is
provided, weights will be generated for sea points as well as land
in the appropriate topographic band.
bands_config (dict):
Definition of orography bands required.
The expected format of the dictionary is e.g
{'bounds':[[0, 50], [50, 200]], 'units': 'm'}
The default dictionary has the following form:
{'bounds': [[-500., 50.], [50., 100.],
[100., 150.],[150., 200.], [200., 250.],
[250., 300.], [300., 400.], [400., 500.],
[500., 650.],[650., 800.], [800., 950.],
[950., 6000.]], 'units': 'm'}
Returns:
iris.cube.Cube:
Cube containing the weights depending upon where the orography
point is within the topographical zones.
"""
from improver.generate_ancillaries.generate_ancillary import THRESHOLDS_DICT
from improver.generate_ancillaries.generate_topographic_zone_weights import (
GenerateTopographicZoneWeights,
)
if bands_config is None:
bands_config = THRESHOLDS_DICT
if land_sea_mask:
land_sea_mask = next(
land_sea_mask.slices(
[land_sea_mask.coord(axis="y"), land_sea_mask.coord(axis="x")]
)
)
orography = next(
orography.slices([orography.coord(axis="y"), orography.coord(axis="x")])
)
result = GenerateTopographicZoneWeights()(
orography, bands_config, landmask=land_sea_mask
)
return result
| bsd-3-clause | b0981943f75324dd8964136b057123e0 | 41.786408 | 81 | 0.671205 | 4.253861 | false | false | false | false |
metoppv/improver | improver/nowcasting/pysteps_advection.py | 3 | 11600 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Semi-Lagrangian backward advection plugin using pysteps"""
from datetime import timedelta
from typing import Dict, List, Optional
import numpy as np
from iris.coords import AuxCoord
from iris.cube import Cube, CubeList
from numpy import ndarray
from improver import BasePlugin
from improver.metadata.amend import amend_attributes, set_history_attribute
from improver.metadata.utilities import generate_mandatory_attributes
from improver.nowcasting.utilities import ApplyOrographicEnhancement
from improver.utilities.redirect_stdout import redirect_stdout
from improver.utilities.spatial import (
calculate_grid_spacing,
check_if_grid_is_equal_area,
)
from improver.utilities.temporal import datetime_to_iris_time, iris_time_to_datetime
class PystepsExtrapolate(BasePlugin):
"""Wrapper for the pysteps semi-Lagrangian extrapolation method
Reference:
https://pysteps.readthedocs.io/en/latest/generated/
pysteps.extrapolation.semilagrangian.extrapolate.html
"""
def __init__(self, interval: int, max_lead_time: int) -> None:
"""
Initialise the plugin
Args:
interval:
Lead time interval, in minutes
max_lead_time:
Maximum lead time required, in minutes
"""
self.interval = interval
self.num_timesteps = max_lead_time // interval
def _get_advectable_precip_rate(self) -> ndarray:
"""
From the initial cube, generate a precipitation rate array in mm h-1
with orographic enhancement subtracted, as required for advection
Returns:
2D precipitation rate array in mm h-1
"""
(self.analysis_cube,) = ApplyOrographicEnhancement("subtract").process(
self.analysis_cube, self.orogenh
)
self.analysis_cube.convert_units("mm h-1")
return np.ma.filled(self.analysis_cube.data, np.nan)
def _generate_displacement_array(self, ucube: Cube, vcube: Cube) -> ndarray:
"""
Create displacement array of shape (2 x m x n) required by pysteps
algorithm
Args:
ucube:
Cube of x-advection velocities
vcube:
Cube of y-advection velocities
Returns:
Array of shape (2, m, n) containing the x- and y-components
of the m*n displacement field (format required for pysteps
extrapolation algorithm)
"""
def _calculate_displacement(
cube: Cube, interval: int, gridlength: float
) -> ndarray:
"""
Calculate displacement for each time step using velocity cube and
time interval
Args:
cube:
Cube of velocities in the x or y direction
interval:
Lead time interval, in minutes
gridlength:
Size of grid square, in metres
Returns:
Array of displacements in grid squares per time step
"""
cube_ms = cube.copy()
cube_ms.convert_units("m s-1")
displacement = cube_ms.data * interval * 60.0 / gridlength
return np.ma.filled(displacement, np.nan)
gridlength = calculate_grid_spacing(self.analysis_cube, "metres")
udisp = _calculate_displacement(ucube, self.interval, gridlength)
vdisp = _calculate_displacement(vcube, self.interval, gridlength)
displacement = np.array([udisp, vdisp])
return displacement
def _reformat_analysis_cube(self, attribute_changes):
"""
Add forecast reference time and forecast period coordinates (if they do
not already exist) and nowcast attributes to analysis cube
"""
coords = [coord.name() for coord in self.analysis_cube.coords()]
if "forecast_reference_time" not in coords:
frt_coord = self.analysis_cube.coord("time").copy()
frt_coord.rename("forecast_reference_time")
self.analysis_cube.add_aux_coord(frt_coord)
if "forecast_period" not in coords:
self.analysis_cube.add_aux_coord(
AuxCoord(np.array([0], dtype=np.int32), "forecast_period", "seconds")
)
self.analysis_cube.attributes = generate_mandatory_attributes(
[self.analysis_cube]
)
self.analysis_cube.attributes["source"] = "MONOW"
self.analysis_cube.attributes[
"title"
] = "MONOW Extrapolation Nowcast on UK 2 km Standard Grid"
set_history_attribute(self.analysis_cube, "Nowcast")
if attribute_changes is not None:
amend_attributes(self.analysis_cube, attribute_changes)
def _set_up_output_cubes(self, all_forecasts: ndarray) -> CubeList:
"""
Convert 3D numpy array into list of cubes with correct time metadata.
All other metadata are inherited from self.analysis_cube.
Args:
all_forecasts:
Array of 2D forecast fields returned by extrapolation function
Returns:
List of extrapolated cubes with correct time coordinates
"""
current_datetime = iris_time_to_datetime(self.analysis_cube.coord("time"))[0]
forecast_cubes = [self.analysis_cube.copy()]
for i in range(len(all_forecasts)):
# copy forecast data into template cube
new_cube = self.analysis_cube.copy(
data=all_forecasts[i, :, :].astype(np.float32)
)
# update time and forecast period coordinates
current_datetime += timedelta(seconds=self.interval * 60)
current_time = datetime_to_iris_time(current_datetime)
new_cube.coord("time").points = np.array([current_time], dtype=np.int64)
new_cube.coord("forecast_period").points = np.array(
[(i + 1) * self.interval * 60], dtype=np.int32
)
forecast_cubes.append(new_cube)
return forecast_cubes
def _generate_forecast_cubes(
self, all_forecasts: ndarray, attributes_dict: Optional[Dict]
) -> List[Cube]:
"""
Convert forecast arrays into IMPROVER output cubes with re-added
orographic enhancement
Args:
all_forecasts:
Array of 2D forecast fields returned by extrapolation function
attributes_dict:
Dictionary containing information for amending the attributes
of the output cube.
Returns:
List of iris.cube.Cube instances containing forecasts at all
required lead times, and conforming to the IMPROVER metadata
standard.
"""
# re-mask forecast data
all_forecasts = np.ma.masked_invalid(all_forecasts)
# put forecast data arrays into cubes
self._reformat_analysis_cube(attributes_dict)
timestamped_cubes = self._set_up_output_cubes(all_forecasts)
# re-convert cubes to original units and add orographic enhancement
forecast_cubes = []
for cube in timestamped_cubes:
cube.convert_units(self.required_units)
if self.orogenh:
(cube,) = ApplyOrographicEnhancement("add").process(cube, self.orogenh)
forecast_cubes.append(cube)
return forecast_cubes
def process(
self,
initial_cube: Cube,
ucube: Cube,
vcube: Cube,
orographic_enhancement: Cube,
attributes_dict: Optional[Dict] = None,
) -> List[Cube]:
"""
Extrapolate the initial precipitation field using the velocities
provided to the required forecast lead times
Args:
initial_cube:
Cube of precipitation at initial time
ucube:
x-advection velocities
vcube:
y-advection velocities
orographic_enhancement:
Cube containing orographic enhancement fields at all required
lead times
attributes_dict:
Dictionary containing information for amending the attributes
of the output cube.
Returns:
List of extrapolated iris.cube.Cube instances at the required
lead times (including T+0 / analysis time)
"""
# ensure input cube is suitable for advection
if "rate" not in initial_cube.name():
msg = "{} is not a precipitation rate cube"
raise ValueError(msg.format(initial_cube.name()))
check_if_grid_is_equal_area(initial_cube)
self.analysis_cube = initial_cube.copy()
self.required_units = initial_cube.units
self.orogenh = orographic_enhancement
# get unmasked precipitation rate array with orographic enhancement
# subtracted to input into advection
precip_rate = self._get_advectable_precip_rate()
# calculate displacement in grid squares per time step
displacement = self._generate_displacement_array(ucube, vcube)
# PySteps prints a message on import to stdout - trap this
# This should be removed for PySteps v1.1.0 which has a configuration setting
# for this
# Import here to minimise dependencies
with redirect_stdout():
from pysteps.extrapolation.semilagrangian import extrapolate
# call pysteps extrapolation method; using interp_order=0 which is
# nearest neighbour
all_forecasts = extrapolate(
precip_rate,
displacement,
self.num_timesteps,
allow_nonfinite_values=True,
interp_order=0,
)
# repackage data as IMPROVER masked cubes
forecast_cubes = self._generate_forecast_cubes(all_forecasts, attributes_dict)
return forecast_cubes
| bsd-3-clause | e22a0dd6dc2f271dafce664524c4e50c | 39 | 87 | 0.637414 | 4.270987 | false | false | false | false |
metoppv/improver | improver_tests/utilities/test_pad_spatial.py | 3 | 25212 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for spatial padding utilities"""
import unittest
import iris
import numpy as np
from iris.coords import DimCoord
from iris.tests import IrisTest
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.pad_spatial import (
_create_cube_with_padded_data,
create_cube_with_halo,
pad_coord,
pad_cube_with_halo,
remove_cube_halo,
remove_halo_from_cube,
)
class Test_pad_coord(IrisTest):
"""Test the padding of a coordinate."""
def setUp(self):
"""Set up a cube."""
self.cube = set_up_variable_cube(
np.ones((5, 5), dtype=np.float32), spatial_grid="equalarea"
)
coord_points_x = np.linspace(10.0, 50.0, 5)
x_bounds = np.array([coord_points_x - 5, coord_points_x + 5]).T
coord_points_y = np.linspace(5.0, 85.0, 5)
y_bounds = np.array([coord_points_y - 10, coord_points_y + 10]).T
self.cube.coord("projection_x_coordinate").points = coord_points_x
self.cube.coord("projection_x_coordinate").bounds = x_bounds
self.cube.coord("projection_y_coordinate").points = coord_points_y
self.cube.coord("projection_y_coordinate").bounds = y_bounds
self.cube_y_reorder = self.cube.copy()
coord_points_y_reorder = np.flip(coord_points_y)
y_bounds_reorder = np.array(
[coord_points_y_reorder + 10, coord_points_y_reorder - 10]
).T
self.cube_y_reorder.coord(
"projection_y_coordinate"
).points = coord_points_y_reorder
self.cube_y_reorder.coord("projection_y_coordinate").bounds = y_bounds_reorder
def test_add(self):
"""Test the functionality to add padding to the chosen coordinate.
Includes a test that the coordinate bounds array is modified to reflect
the new values."""
expected = np.linspace(0.0, 60.0, 7)
coord = self.cube.coord("projection_x_coordinate")
expected_bounds = np.array([expected - 5, expected + 5]).T
width = 1
method = "add"
new_coord = pad_coord(coord, width, method)
self.assertIsInstance(new_coord, DimCoord)
self.assertArrayAlmostEqual(new_coord.points, expected)
self.assertArrayEqual(new_coord.bounds, expected_bounds)
def test_add_y_reorder(self):
"""Test the functionality to add still works if y is negative."""
expected = np.linspace(105.0, -15.0, 7)
y_coord = self.cube_y_reorder.coord("projection_y_coordinate")
expected_bounds = np.array([expected + 10, expected - 10]).T
width = 1
method = "add"
new_coord = pad_coord(y_coord, width, method)
self.assertIsInstance(new_coord, DimCoord)
self.assertArrayAlmostEqual(new_coord.points, expected)
self.assertArrayEqual(new_coord.bounds, expected_bounds)
def test_exception(self):
"""Test an exception is raised if the chosen coordinate is
non-uniform."""
coord_points = np.arange(10.0, 60.0, 10.0)
coord_points[0] = -200.0
self.cube.coord("projection_x_coordinate").points = coord_points
coord = self.cube.coord("projection_x_coordinate")
width = 1
method = "add"
msg = "Non-uniform increments between grid points"
with self.assertRaisesRegex(ValueError, msg):
pad_coord(coord, width, method)
def test_remove(self):
"""Test the functionality to remove padding from the chosen
coordinate. Includes a test that the coordinate bounds array is
modified to reflect the new values."""
expected = np.array([20.0, 30.0, 40.0])
expected_bounds = np.array([expected - 5, expected + 5]).T
coord = self.cube.coord("projection_x_coordinate")
width = 1
method = "remove"
new_coord = pad_coord(coord, width, method)
self.assertIsInstance(new_coord, DimCoord)
self.assertArrayAlmostEqual(new_coord.points, expected)
self.assertArrayEqual(new_coord.bounds, expected_bounds)
class Test_create_cube_with_halo(IrisTest):
"""Tests for the create_cube_with_halo function"""
def setUp(self):
"""Set up a realistic input cube with lots of metadata. Input cube
grid is 1000x1000 km with points spaced 100 km apart."""
attrs = {
"history": "2018-12-10Z: StaGE Decoupler",
"title": "Temperature on UK 2 km Standard Grid",
"source": "Met Office Unified Model",
}
self.cube = set_up_variable_cube(
np.ones((11, 11), dtype=np.float32),
spatial_grid="equalarea",
standard_grid_metadata="uk_det",
attributes=attrs,
)
self.grid_spacing = np.diff(self.cube.coord(axis="x").points)[0]
self.halo_size_m = 2000.0
def test_basic(self):
"""Test function returns a cube with expected metadata"""
result = create_cube_with_halo(self.cube, self.halo_size_m)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.name(), "grid_with_halo")
self.assertFalse(result.attributes)
def test_values(self):
"""Test coordinate values with standard halo radius (rounds down to 1
grid cell)"""
x_min = self.cube.coord(axis="x").points[0] - self.grid_spacing
x_max = self.cube.coord(axis="x").points[-1] + self.grid_spacing
expected_x_points = np.arange(x_min, x_max + 1, self.grid_spacing)
y_min = self.cube.coord(axis="y").points[0] - self.grid_spacing
y_max = self.cube.coord(axis="y").points[-1] + self.grid_spacing
expected_y_points = np.arange(y_min, y_max + 1, self.grid_spacing)
result = create_cube_with_halo(self.cube, self.halo_size_m)
self.assertSequenceEqual(result.data.shape, (13, 13))
self.assertArrayAlmostEqual(result.coord(axis="x").points, expected_x_points)
self.assertArrayAlmostEqual(result.coord(axis="y").points, expected_y_points)
# check explicitly that the original grid remains an exact subset of
# the output cube (ie that padding hasn't shifted the existing grid)
self.assertArrayAlmostEqual(
result.coord(axis="x").points[1:-1], self.cube.coord(axis="x").points
)
self.assertArrayAlmostEqual(
result.coord(axis="y").points[1:-1], self.cube.coord(axis="y").points
)
class Test__create_cube_with_padded_data(IrisTest):
"""Test creating a new cube using a template cube."""
def setUp(self):
"""Set up a cube."""
self.cube = set_up_variable_cube(
np.ones((1, 5, 5), dtype=np.float32), spatial_grid="equalarea"
)
self.cube.data[0, 2, 2] = 0
def test_yx_order(self):
"""Test that a cube is created with the expected order for the y and x
coordinates within the output cube, if the input cube has dimensions
of projection_y_coordinate and projection_x_coordinate."""
sliced_cube = next(
self.cube.slices(["projection_y_coordinate", "projection_x_coordinate"])
)
data = sliced_cube.data
coord_x = sliced_cube.coord("projection_x_coordinate")
coord_y = sliced_cube.coord("projection_y_coordinate")
new_cube = _create_cube_with_padded_data(sliced_cube, data, coord_x, coord_y)
self.assertIsInstance(new_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(new_cube.data, data)
self.assertEqual(new_cube.coord_dims("projection_y_coordinate")[0], 0)
self.assertEqual(new_cube.coord_dims("projection_x_coordinate")[0], 1)
def test_xy_order(self):
"""Test that a cube is created with the expected order for the y and x
coordinates within the output cube, if the input cube has dimensions
of projection_x_coordinate and projection_y_coordinate."""
sliced_cube = next(
self.cube.slices(["projection_x_coordinate", "projection_y_coordinate"])
)
data = sliced_cube.data
coord_x = sliced_cube.coord("projection_x_coordinate")
coord_y = sliced_cube.coord("projection_y_coordinate")
new_cube = _create_cube_with_padded_data(sliced_cube, data, coord_x, coord_y)
self.assertIsInstance(new_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(new_cube.data, data)
self.assertEqual(new_cube.coord_dims("projection_y_coordinate")[0], 1)
self.assertEqual(new_cube.coord_dims("projection_x_coordinate")[0], 0)
def test_realization(self):
"""Test that a cube is created with the expected order for coordinates
within the output cube, if the input cube has dimensions of
realization, projection_y_coordinate and projection_x_coordinate."""
sliced_cube = next(
self.cube.slices(
["realization", "projection_y_coordinate", "projection_x_coordinate"]
)
)
data = sliced_cube.data
coord_x = sliced_cube.coord("projection_x_coordinate")
coord_y = sliced_cube.coord("projection_y_coordinate")
new_cube = _create_cube_with_padded_data(sliced_cube, data, coord_x, coord_y)
self.assertIsInstance(new_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(new_cube.data, data)
self.assertEqual(new_cube.coord_dims("realization")[0], 0)
self.assertEqual(new_cube.coord_dims("projection_y_coordinate")[0], 1)
self.assertEqual(new_cube.coord_dims("projection_x_coordinate")[0], 2)
def test_no_y_dimension_coordinate(self):
"""Test that a cube is created with the expected y and x coordinates
within the output cube, if the input cube only has a
projection_y_coordinate dimension coordinate."""
sliced_cube = next(self.cube.slices(["projection_x_coordinate"]))
data = sliced_cube.data
coord_x = sliced_cube.coord("projection_x_coordinate")
coord_y = sliced_cube.coord("projection_y_coordinate")
new_cube = _create_cube_with_padded_data(sliced_cube, data, coord_x, coord_y)
self.assertIsInstance(new_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(new_cube.data, data)
self.assertEqual(new_cube.coord_dims("projection_x_coordinate")[0], 0)
self.assertTrue(new_cube.coords("projection_y_coordinate", dim_coords=False))
class Test_pad_cube_with_halo(IrisTest):
"""Test for padding a cube with a halo."""
def setUp(self):
"""Set up a cube."""
self.cube = set_up_variable_cube(
np.ones((5, 5), dtype=np.float32), spatial_grid="equalarea"
)
self.cube.data[2, 2] = 0
data = np.array(
[[0.0, 0.1, 0.0], [0.1, 0.5, 0.1], [0.0, 0.1, 0.0]], dtype=np.float32,
)
self.alternative_cube = self.cube[1:-1, 1:-1].copy(data=data)
def test_basic(self):
"""Test that padding the data in a cube with a halo has worked as
intended when using the default option, which is to use zeroes."""
expected = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
width_x = width_y = 2
padded_cube = pad_cube_with_halo(self.cube, width_x, width_y)
self.assertIsInstance(padded_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(padded_cube.data, expected)
def test_different_widths(self):
"""Test that padding the data in a cube with different widths has
worked as intended."""
expected = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
)
width_x = 2
width_y = 4
padded_cube = pad_cube_with_halo(self.cube, width_x, width_y)
self.assertIsInstance(padded_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(padded_cube.data, expected)
def test_zero_width(self):
"""Test that padding the data in a cube with a width of zero has
worked as intended."""
expected = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
)
width_x = 0
width_y = 4
padded_cube = pad_cube_with_halo(self.cube, width_x, width_y)
self.assertIsInstance(padded_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(padded_cube.data, expected)
def test_halo_using_mean_method(self):
"""Test values in halo are correctly smoothed when using pad_method='mean'.
This impacts recursive filter outputs."""
data = np.array(
[
[0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.25, 0.0, 0.0],
[0.1, 0.25, 0.5, 0.25, 0.1],
[0.0, 0.0, 0.25, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.0, 0.0],
],
dtype=np.float32,
)
self.cube.data = data
expected_data = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.0, 0.0],
[0.1, 0.1, 0.1, 0.25, 0.5, 0.25, 0.1, 0.1, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.25, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0],
],
dtype=np.float32,
)
padded_cube = pad_cube_with_halo(self.cube, 2, 2, pad_method="mean")
self.assertArrayAlmostEqual(padded_cube.data, expected_data)
def test_halo_using_symmetric_method(self):
"""Test values in halo are correct when using pad_method='symmetric'."""
expected_data = np.array(
[
[0.5, 0.1, 0.1, 0.5, 0.1, 0.1, 0.5],
[0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.1],
[0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.1],
[0.5, 0.1, 0.1, 0.5, 0.1, 0.1, 0.5],
[0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.1],
[0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.1],
[0.5, 0.1, 0.1, 0.5, 0.1, 0.1, 0.5],
],
dtype=np.float32,
)
padded_cube = pad_cube_with_halo(
self.alternative_cube, 2, 2, pad_method="symmetric"
)
self.assertArrayAlmostEqual(padded_cube.data, expected_data)
def test_halo_using_minimum_method(self):
"""Test values in halo are correct when using pad_method='minimum'.
Note that a larger halo is used as the stat_length used for np.pad
is half the halo width, thus to include the 0.5 within stat_length the
halo size must be at least 4."""
expected_data = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.1, 0.1, 0.1, 0.1, 0.1, 0.5, 0.1, 0.1, 0.1, 0.1, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0],
],
dtype=np.float32,
)
padded_cube = pad_cube_with_halo(
self.alternative_cube, 4, 4, pad_method="minimum"
)
self.assertArrayAlmostEqual(padded_cube.data, expected_data)
def test_halo_using_maximum_method(self):
"""Test values in halo are correct when using pad_method='maximum'.
Note that a larger halo is used as the stat_length used for np.pad
is half the halo width, thus to include the 0.5 within stat_length the
halo size must be at least 4."""
expected_data = np.array(
[
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.1, 0.1, 0.1, 0.1, 0.0, 0.1, 0.0, 0.1, 0.1, 0.1, 0.1],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.1, 0.1, 0.1, 0.1, 0.0, 0.1, 0.0, 0.1, 0.1, 0.1, 0.1],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.1, 0.5, 0.1, 0.5, 0.5, 0.5, 0.5],
],
dtype=np.float32,
)
padded_cube = pad_cube_with_halo(
self.alternative_cube, 4, 4, pad_method="maximum"
)
self.assertArrayAlmostEqual(padded_cube.data, expected_data)
class Test_remove_cube_halo(IrisTest):
"""Tests for the remove_cube_halo function"""
def setUp(self):
"""Set up a realistic input cube with lots of metadata. Input cube
grid is 1000x1000 km with points spaced 100 km apart."""
self.attrs = {
"history": "2018-12-10Z: StaGE Decoupler",
"title": "Temperature on UK 2 km Standard Grid",
"source": "Met Office Unified Model",
}
self.cube = set_up_variable_cube(
np.ones((3, 11, 11), dtype=np.float32),
spatial_grid="equalarea",
standard_grid_metadata="uk_det",
attributes=self.attrs,
)
self.cube_1d = set_up_variable_cube(
np.ones((1, 11, 11), dtype=np.float32),
spatial_grid="equalarea",
standard_grid_metadata="uk_det",
attributes=self.attrs,
)
self.grid_spacing = np.diff(self.cube.coord(axis="x").points)[0]
self.halo_size_m = 2000.0
def test_basic(self):
"""Test function returns a cube with expected attributes and shape"""
result = remove_cube_halo(self.cube, self.halo_size_m)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.attributes["history"], self.attrs["history"])
self.assertEqual(result.attributes["title"], self.attrs["title"])
self.assertEqual(result.attributes["source"], self.attrs["source"])
self.assertSequenceEqual(result.data.shape, (3, 9, 9))
def test_values(self):
"""Test function returns a cube with expected shape and data"""
self.cube_1d.data[0, 2, :] = np.arange(0, 11)
self.cube_1d.data[0, :, 2] = np.arange(0, 11)
result = remove_cube_halo(self.cube_1d, self.halo_size_m)
self.assertIsInstance(result, iris.cube.Cube)
self.assertSequenceEqual(result.data.shape, (1, 9, 9))
self.assertArrayEqual(result.data[0, 1, :], np.arange(1, 10))
self.assertArrayEqual(result.data[0, :, 1], np.arange(1, 10))
class Test_remove_halo_from_cube(IrisTest):
"""Test a halo is removed from the cube data."""
def setUp(self):
"""Set up a cube."""
self.cube = set_up_variable_cube(
np.ones((5, 5), dtype=np.float32), spatial_grid="equalarea"
)
self.cube.data[2, 2] = 0
self.large_cube = set_up_variable_cube(
np.ones((10, 10), dtype=np.float32), spatial_grid="equalarea"
)
# set equally-spaced coordinate points
self.large_cube.coord(axis="y").points = np.linspace(
0, 900000, 10, dtype=np.float32
)
self.large_cube.coord(axis="x").points = np.linspace(
-300000, 600000, 10, dtype=np.float32
)
self.large_cube.data[5, 5] = 0
def test_basic(self):
"""Test that removing a halo of points from the data on a cube
has worked as intended."""
expected = np.array([[0.0]])
width_x = width_y = 2
padded_cube = remove_halo_from_cube(self.cube, width_x, width_y)
self.assertIsInstance(padded_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(padded_cube.data, expected)
def test_different_widths(self):
"""Test that removing a halo of points from the data on a cube
has worked as intended for different x and y widths."""
expected = np.array(
[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 0.0, 1.0, 1.0]]
)
width_x = 2
width_y = 4
padded_cube = remove_halo_from_cube(self.large_cube, width_x, width_y)
self.assertIsInstance(padded_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(padded_cube.data, expected)
def test_zero_width(self):
"""Test that removing a halo of points from the data on a cube
has worked as intended, if a width of zero is specified."""
expected = np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0],
]
)
width_x = 0
width_y = 4
padded_cube = remove_halo_from_cube(self.large_cube, width_x, width_y)
self.assertIsInstance(padded_cube, iris.cube.Cube)
self.assertArrayAlmostEqual(padded_cube.data, expected)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | e58abcb59aae624597997054739cbb95 | 42.846957 | 86 | 0.559059 | 3.017233 | false | true | false | false |
metoppv/improver | improver/cli/nowcast_optical_flow.py | 3 | 3156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Script to calculate optical flow advection velocities with option to
extrapolate."""
from improver import cli
@cli.clizefy
@cli.with_output
def process(
orographic_enhancement: cli.inputcube, *cubes: cli.inputcube,
):
"""Calculate optical flow components from input fields.
Args:
orographic_enhancement (iris.cube.Cube):
Cube containing the orographic enhancement fields.
cubes (iris.cube.CubeList):
Cubes from which to calculate optical flow velocities.
These three cubes will be sorted by their time coords.
Returns:
iris.cube.CubeList:
List of the umean and vmean cubes.
"""
from iris.cube import CubeList
from improver.nowcasting.optical_flow import generate_optical_flow_components
from improver.nowcasting.utilities import ApplyOrographicEnhancement
original_cube_list = CubeList(cubes)
# order input files by validity time
original_cube_list.sort(key=lambda x: x.coord("time").points[0])
# subtract orographic enhancement
cube_list = ApplyOrographicEnhancement("subtract")(
original_cube_list, orographic_enhancement
)
# calculate optical flow velocities from T-1 to T and T-2 to T-1, and
# average to produce the velocities for use in advection
u_mean, v_mean = generate_optical_flow_components(
cube_list, ofc_box_size=30, smart_smoothing_iterations=100
)
return CubeList([u_mean, v_mean])
| bsd-3-clause | 4c7758f2df825360fe4bfedb2adcc627 | 39.987013 | 81 | 0.72275 | 4.120104 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.