body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4', flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True, compensate=False, plot=('time', 'magnitude', 'delay'), axes=None):
'Plot properties of a filter.\n\n Parameters\n ----------\n h : dict or ndarray\n An IIR dict or 1D ndarray of coefficients (for FIR filter).\n sfreq : float\n Sample rate of the data (Hz).\n freq : array-like or None\n The ideal response frequencies to plot (must be in ascending order).\n If None (default), do not plot the ideal response.\n gain : array-like or None\n The ideal response gains to plot.\n If None (default), do not plot the ideal response.\n title : str | None\n The title to use. If None (default), determine the title based\n on the type of the system.\n color : color object\n The color to use (default \'#1f77b4\').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None, freq will be used. If None (default) and freq is None,\n ``(0.1, sfreq / 2.)`` will be used.\n fscale : str\n Frequency scaling to use, can be "log" (default) or "linear".\n alim : tuple\n The y-axis amplitude limits (dB) to use (default: (-60, 10)).\n show : bool\n Show figure if True (default).\n compensate : bool\n If True, compensate for the filter delay (phase will not be shown).\n\n - For linear-phase FIR filters, this visualizes the filter coefficients\n assuming that the output will be shifted by ``N // 2``.\n - For IIR filters, this changes the filter coefficient display\n by filtering backward and forward, and the frequency response\n by squaring it.\n\n .. versionadded:: 0.18\n plot : list | tuple | str\n A list of the requested plots from ``time``, ``magnitude`` and\n ``delay``. Default is to plot all three filter properties\n (\'time\', \'magnitude\', \'delay\').\n\n .. versionadded:: 0.21.0\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of requested plot types. If instance of\n Axes, there must be only one filter property plotted.\n Defaults to ``None``.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure containing the plots.\n\n See Also\n --------\n mne.filter.create_filter\n plot_ideal_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n '
from scipy.signal import freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for (xi, x) in enumerate(plot):
_check_option(('plot[%d]' % xi), x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if (fscale == 'log'):
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
(xticks, xticklabels) = _filter_ticks(flim, fscale)
omega /= (sfreq / (2 * np.pi))
if isinstance(h, dict):
if ('sos' in h):
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj()
else:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [((n - 1), 0)], 'constant')
func = sosfiltfilt
gd += ((len(delta) - 1) // 2)
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [((n - 1), 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if (title is None):
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
gd = group_delay((h, [1.0]), omega)[1]
title = ('FIR filter' if (title is None) else title)
if compensate:
title += ' (delay-compensated)'
fig = None
if (axes is None):
(fig, axes) = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if (fig is None):
fig = axes[0].get_figure()
if (len(axes) != len(plot)):
raise ValueError(('Length of axes (%d) must be the same as number of requested filter properties (%d)' % (len(axes), len(plot))))
t = np.arange(len(h))
dlim = (np.abs(t).max() / 2.0)
dlim = [(- dlim), dlim]
if compensate:
n_shift = ((len(h) - 1) // 2)
t -= n_shift
assert (t[0] == (- t[(- 1)]))
gd -= n_shift
t = (t / sfreq)
gd = (gd / sfreq)
f = ((omega * sfreq) / (2 * np.pi))
sl = slice((0 if (fscale == 'linear') else 1), None, None)
mag = (10 * np.log10(np.maximum((H * H.conj()).real, 1e-20)))
if ('time' in plot):
ax_time_idx = np.where([(p == 'time') for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, (- 1)]], xlabel='Time (s)', ylabel='Amplitude', title=title)
if ('magnitude' in plot):
ax_mag_idx = np.where([(p == 'magnitude') for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color, linewidth=2, zorder=4)
if ((freq is not None) and (gain is not None)):
plot_ideal_filter(freq, gain, axes[ax_mag_idx], fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel='', xscale=fscale)
if (xticks is not None):
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)')
if ('delay' in plot):
ax_delay_idx = np.where([(p == 'delay') for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color, linewidth=2, zorder=4)
for (start, stop) in zip(*_mask_to_onsets_offsets((mag <= (- 39.9)))):
axes[ax_delay_idx].axvspan(f[start], f[(stop - 1)], facecolor='k', alpha=0.05, zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)', xlabel='Frequency (Hz)', xscale=fscale)
if (xticks is not None):
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)', ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
| -8,341,377,506,035,980,000
|
Plot properties of a filter.
Parameters
----------
h : dict or ndarray
An IIR dict or 1D ndarray of coefficients (for FIR filter).
sfreq : float
Sample rate of the data (Hz).
freq : array-like or None
The ideal response frequencies to plot (must be in ascending order).
If None (default), do not plot the ideal response.
gain : array-like or None
The ideal response gains to plot.
If None (default), do not plot the ideal response.
title : str | None
The title to use. If None (default), determine the title based
on the type of the system.
color : color object
The color to use (default '#1f77b4').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None, freq will be used. If None (default) and freq is None,
``(0.1, sfreq / 2.)`` will be used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
The y-axis amplitude limits (dB) to use (default: (-60, 10)).
show : bool
Show figure if True (default).
compensate : bool
If True, compensate for the filter delay (phase will not be shown).
- For linear-phase FIR filters, this visualizes the filter coefficients
assuming that the output will be shifted by ``N // 2``.
- For IIR filters, this changes the filter coefficient display
by filtering backward and forward, and the frequency response
by squaring it.
.. versionadded:: 0.18
plot : list | tuple | str
A list of the requested plots from ``time``, ``magnitude`` and
``delay``. Default is to plot all three filter properties
('time', 'magnitude', 'delay').
.. versionadded:: 0.21.0
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of requested plot types. If instance of
Axes, there must be only one filter property plotted.
Defaults to ``None``.
.. versionadded:: 0.21.0
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the plots.
See Also
--------
mne.filter.create_filter
plot_ideal_filter
Notes
-----
.. versionadded:: 0.14
|
mne/viz/misc.py
|
plot_filter
|
Aniket-Pradhan/mne-python
|
python
|
def plot_filter(h, sfreq, freq=None, gain=None, title=None, color='#1f77b4', flim=None, fscale='log', alim=_DEFAULT_ALIM, show=True, compensate=False, plot=('time', 'magnitude', 'delay'), axes=None):
'Plot properties of a filter.\n\n Parameters\n ----------\n h : dict or ndarray\n An IIR dict or 1D ndarray of coefficients (for FIR filter).\n sfreq : float\n Sample rate of the data (Hz).\n freq : array-like or None\n The ideal response frequencies to plot (must be in ascending order).\n If None (default), do not plot the ideal response.\n gain : array-like or None\n The ideal response gains to plot.\n If None (default), do not plot the ideal response.\n title : str | None\n The title to use. If None (default), determine the title based\n on the type of the system.\n color : color object\n The color to use (default \'#1f77b4\').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None, freq will be used. If None (default) and freq is None,\n ``(0.1, sfreq / 2.)`` will be used.\n fscale : str\n Frequency scaling to use, can be "log" (default) or "linear".\n alim : tuple\n The y-axis amplitude limits (dB) to use (default: (-60, 10)).\n show : bool\n Show figure if True (default).\n compensate : bool\n If True, compensate for the filter delay (phase will not be shown).\n\n - For linear-phase FIR filters, this visualizes the filter coefficients\n assuming that the output will be shifted by ``N // 2``.\n - For IIR filters, this changes the filter coefficient display\n by filtering backward and forward, and the frequency response\n by squaring it.\n\n .. versionadded:: 0.18\n plot : list | tuple | str\n A list of the requested plots from ``time``, ``magnitude`` and\n ``delay``. Default is to plot all three filter properties\n (\'time\', \'magnitude\', \'delay\').\n\n .. versionadded:: 0.21.0\n axes : instance of Axes | list | None\n The axes to plot to. If list, the list must be a list of Axes of\n the same length as the number of requested plot types. If instance of\n Axes, there must be only one filter property plotted.\n Defaults to ``None``.\n\n .. versionadded:: 0.21.0\n\n Returns\n -------\n fig : matplotlib.figure.Figure\n The figure containing the plots.\n\n See Also\n --------\n mne.filter.create_filter\n plot_ideal_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n '
from scipy.signal import freqz, group_delay, lfilter, filtfilt, sosfilt, sosfiltfilt
import matplotlib.pyplot as plt
sfreq = float(sfreq)
_check_option('fscale', fscale, ['log', 'linear'])
if isinstance(plot, str):
plot = [plot]
for (xi, x) in enumerate(plot):
_check_option(('plot[%d]' % xi), x, ('magnitude', 'delay', 'time'))
flim = _get_flim(flim, fscale, freq, sfreq)
if (fscale == 'log'):
omega = np.logspace(np.log10(flim[0]), np.log10(flim[1]), 1000)
else:
omega = np.linspace(flim[0], flim[1], 1000)
(xticks, xticklabels) = _filter_ticks(flim, fscale)
omega /= (sfreq / (2 * np.pi))
if isinstance(h, dict):
if ('sos' in h):
H = np.ones(len(omega), np.complex128)
gd = np.zeros(len(omega))
for section in h['sos']:
this_H = freqz(section[:3], section[3:], omega)[1]
H *= this_H
if compensate:
H *= this_H.conj()
else:
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
gd += group_delay((section[:3], section[3:]), omega)[1]
n = estimate_ringing_samples(h['sos'])
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [((n - 1), 0)], 'constant')
func = sosfiltfilt
gd += ((len(delta) - 1) // 2)
else:
func = sosfilt
h = func(h['sos'], delta)
else:
H = freqz(h['b'], h['a'], omega)[1]
if compensate:
H *= H.conj()
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
gd = group_delay((h['b'], h['a']), omega)[1]
if compensate:
gd += group_delay(h['b'].conj(), h['a'].conj(), omega)[1]
n = estimate_ringing_samples((h['b'], h['a']))
delta = np.zeros(n)
delta[0] = 1
if compensate:
delta = np.pad(delta, [((n - 1), 0)], 'constant')
func = filtfilt
else:
func = lfilter
h = func(h['b'], h['a'], delta)
if (title is None):
title = 'SOS (IIR) filter'
if compensate:
title += ' (forward-backward)'
else:
H = freqz(h, worN=omega)[1]
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore')
gd = group_delay((h, [1.0]), omega)[1]
title = ('FIR filter' if (title is None) else title)
if compensate:
title += ' (delay-compensated)'
fig = None
if (axes is None):
(fig, axes) = plt.subplots(len(plot), 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if (fig is None):
fig = axes[0].get_figure()
if (len(axes) != len(plot)):
raise ValueError(('Length of axes (%d) must be the same as number of requested filter properties (%d)' % (len(axes), len(plot))))
t = np.arange(len(h))
dlim = (np.abs(t).max() / 2.0)
dlim = [(- dlim), dlim]
if compensate:
n_shift = ((len(h) - 1) // 2)
t -= n_shift
assert (t[0] == (- t[(- 1)]))
gd -= n_shift
t = (t / sfreq)
gd = (gd / sfreq)
f = ((omega * sfreq) / (2 * np.pi))
sl = slice((0 if (fscale == 'linear') else 1), None, None)
mag = (10 * np.log10(np.maximum((H * H.conj()).real, 1e-20)))
if ('time' in plot):
ax_time_idx = np.where([(p == 'time') for p in plot])[0][0]
axes[ax_time_idx].plot(t, h, color=color)
axes[ax_time_idx].set(xlim=t[[0, (- 1)]], xlabel='Time (s)', ylabel='Amplitude', title=title)
if ('magnitude' in plot):
ax_mag_idx = np.where([(p == 'magnitude') for p in plot])[0][0]
axes[ax_mag_idx].plot(f[sl], mag[sl], color=color, linewidth=2, zorder=4)
if ((freq is not None) and (gain is not None)):
plot_ideal_filter(freq, gain, axes[ax_mag_idx], fscale=fscale, show=False)
axes[ax_mag_idx].set(ylabel='Magnitude (dB)', xlabel=, xscale=fscale)
if (xticks is not None):
axes[ax_mag_idx].set(xticks=xticks)
axes[ax_mag_idx].set(xticklabels=xticklabels)
axes[ax_mag_idx].set(xlim=flim, ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)')
if ('delay' in plot):
ax_delay_idx = np.where([(p == 'delay') for p in plot])[0][0]
axes[ax_delay_idx].plot(f[sl], gd[sl], color=color, linewidth=2, zorder=4)
for (start, stop) in zip(*_mask_to_onsets_offsets((mag <= (- 39.9)))):
axes[ax_delay_idx].axvspan(f[start], f[(stop - 1)], facecolor='k', alpha=0.05, zorder=5)
axes[ax_delay_idx].set(xlim=flim, ylabel='Group delay (s)', xlabel='Frequency (Hz)', xscale=fscale)
if (xticks is not None):
axes[ax_delay_idx].set(xticks=xticks)
axes[ax_delay_idx].set(xticklabels=xticklabels)
axes[ax_delay_idx].set(xlim=flim, ylim=dlim, xlabel='Frequency (Hz)', ylabel='Delay (s)')
adjust_axes(axes)
tight_layout()
plt_show(show)
return fig
|
def plot_ideal_filter(freq, gain, axes=None, title='', flim=None, fscale='log', alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--', show=True):
'Plot an ideal filter response.\n\n Parameters\n ----------\n freq : array-like\n The ideal response frequencies to plot (must be in ascending order).\n gain : array-like or None\n The ideal response gains to plot.\n axes : instance of Axes | None\n The subplot handle. With None (default), axes are created.\n title : str\n The title to use, (default: \'\').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None (default), freq used.\n fscale : str\n Frequency scaling to use, can be "log" (default) or "linear".\n alim : tuple\n If not None (default), the y-axis limits (dB) to use.\n color : color object\n The color to use (default: \'r\').\n alpha : float\n The alpha to use (default: 0.5).\n linestyle : str\n The line style to use (default: \'--\').\n show : bool\n Show figure if True (default).\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n plot_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n\n Examples\n --------\n Plot a simple ideal band-pass filter::\n\n >>> from mne.viz import plot_ideal_filter\n >>> freq = [0, 1, 40, 50]\n >>> gain = [0, 1, 1, 0]\n >>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS\n <...Figure...>\n '
import matplotlib.pyplot as plt
(my_freq, my_gain) = (list(), list())
if (freq[0] != 0):
raise ValueError(('freq should start with DC (zero) and end with Nyquist, but got %s for DC' % (freq[0],)))
freq = np.array(freq)
_check_option('fscale', fscale, ['log', 'linear'])
if (fscale == 'log'):
freq[0] = ((0.1 * freq[1]) if (flim is None) else min(flim[0], freq[1]))
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ((ii < (len(freq) - 1)) and (gain[ii] != gain[(ii + 1)])):
transitions += [[freq[ii], freq[(ii + 1)]]]
my_freq += np.linspace(freq[ii], freq[(ii + 1)], 20, endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[(ii + 1)], 20, endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = (10 * np.log10(np.maximum(my_gain, (10 ** (alim[0] / 10.0)))))
if (axes is None):
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5, linewidth=4, zorder=3)
(xticks, xticklabels) = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)', xscale=fscale)
if (xticks is not None):
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
| 1,679,440,645,691,839,700
|
Plot an ideal filter response.
Parameters
----------
freq : array-like
The ideal response frequencies to plot (must be in ascending order).
gain : array-like or None
The ideal response gains to plot.
axes : instance of Axes | None
The subplot handle. With None (default), axes are created.
title : str
The title to use, (default: '').
flim : tuple or None
If not None, the x-axis frequency limits (Hz) to use.
If None (default), freq used.
fscale : str
Frequency scaling to use, can be "log" (default) or "linear".
alim : tuple
If not None (default), the y-axis limits (dB) to use.
color : color object
The color to use (default: 'r').
alpha : float
The alpha to use (default: 0.5).
linestyle : str
The line style to use (default: '--').
show : bool
Show figure if True (default).
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
See Also
--------
plot_filter
Notes
-----
.. versionadded:: 0.14
Examples
--------
Plot a simple ideal band-pass filter::
>>> from mne.viz import plot_ideal_filter
>>> freq = [0, 1, 40, 50]
>>> gain = [0, 1, 1, 0]
>>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS
<...Figure...>
|
mne/viz/misc.py
|
plot_ideal_filter
|
Aniket-Pradhan/mne-python
|
python
|
def plot_ideal_filter(freq, gain, axes=None, title=, flim=None, fscale='log', alim=_DEFAULT_ALIM, color='r', alpha=0.5, linestyle='--', show=True):
'Plot an ideal filter response.\n\n Parameters\n ----------\n freq : array-like\n The ideal response frequencies to plot (must be in ascending order).\n gain : array-like or None\n The ideal response gains to plot.\n axes : instance of Axes | None\n The subplot handle. With None (default), axes are created.\n title : str\n The title to use, (default: \'\').\n flim : tuple or None\n If not None, the x-axis frequency limits (Hz) to use.\n If None (default), freq used.\n fscale : str\n Frequency scaling to use, can be "log" (default) or "linear".\n alim : tuple\n If not None (default), the y-axis limits (dB) to use.\n color : color object\n The color to use (default: \'r\').\n alpha : float\n The alpha to use (default: 0.5).\n linestyle : str\n The line style to use (default: \'--\').\n show : bool\n Show figure if True (default).\n\n Returns\n -------\n fig : instance of matplotlib.figure.Figure\n The figure.\n\n See Also\n --------\n plot_filter\n\n Notes\n -----\n .. versionadded:: 0.14\n\n Examples\n --------\n Plot a simple ideal band-pass filter::\n\n >>> from mne.viz import plot_ideal_filter\n >>> freq = [0, 1, 40, 50]\n >>> gain = [0, 1, 1, 0]\n >>> plot_ideal_filter(freq, gain, flim=(0.1, 100)) #doctest: +ELLIPSIS\n <...Figure...>\n '
import matplotlib.pyplot as plt
(my_freq, my_gain) = (list(), list())
if (freq[0] != 0):
raise ValueError(('freq should start with DC (zero) and end with Nyquist, but got %s for DC' % (freq[0],)))
freq = np.array(freq)
_check_option('fscale', fscale, ['log', 'linear'])
if (fscale == 'log'):
freq[0] = ((0.1 * freq[1]) if (flim is None) else min(flim[0], freq[1]))
flim = _get_flim(flim, fscale, freq)
transitions = list()
for ii in range(len(freq)):
if ((ii < (len(freq) - 1)) and (gain[ii] != gain[(ii + 1)])):
transitions += [[freq[ii], freq[(ii + 1)]]]
my_freq += np.linspace(freq[ii], freq[(ii + 1)], 20, endpoint=False).tolist()
my_gain += np.linspace(gain[ii], gain[(ii + 1)], 20, endpoint=False).tolist()
else:
my_freq.append(freq[ii])
my_gain.append(gain[ii])
my_gain = (10 * np.log10(np.maximum(my_gain, (10 ** (alim[0] / 10.0)))))
if (axes is None):
axes = plt.subplots(1)[1]
for transition in transitions:
axes.axvspan(*transition, color=color, alpha=0.1)
axes.plot(my_freq, my_gain, color=color, linestyle=linestyle, alpha=0.5, linewidth=4, zorder=3)
(xticks, xticklabels) = _filter_ticks(flim, fscale)
axes.set(ylim=alim, xlabel='Frequency (Hz)', ylabel='Amplitude (dB)', xscale=fscale)
if (xticks is not None):
axes.set(xticks=xticks)
axes.set(xticklabels=xticklabels)
axes.set(xlim=flim)
if title:
axes.set(title=title)
adjust_axes(axes)
tight_layout()
plt_show(show)
return axes.figure
|
def _handle_event_colors(color_dict, unique_events, event_id):
'Create event-integer-to-color mapping, assigning defaults as needed.'
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
if (color_dict is None):
if (len(unique_events) > len(_get_color_list())):
warn('More events than default colors available. You should pass a list of unique colors.')
else:
custom_colors = dict()
for (key, color) in color_dict.items():
if (key in unique_events):
custom_colors[key] = color
elif (key in event_id):
custom_colors[event_id[key]] = color
else:
warn(('Event ID %s is in the color dict but is not present in events or event_id.' % str(key)))
unassigned = sorted((set(unique_events) - set(custom_colors)))
if len(unassigned):
unassigned_str = ', '.join((str(e) for e in unassigned))
warn(('Color was not assigned for event%s %s. Default colors will be used.' % (_pl(unassigned), unassigned_str)))
default_colors.update(custom_colors)
return default_colors
| 2,246,880,496,342,512,400
|
Create event-integer-to-color mapping, assigning defaults as needed.
|
mne/viz/misc.py
|
_handle_event_colors
|
Aniket-Pradhan/mne-python
|
python
|
def _handle_event_colors(color_dict, unique_events, event_id):
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
if (color_dict is None):
if (len(unique_events) > len(_get_color_list())):
warn('More events than default colors available. You should pass a list of unique colors.')
else:
custom_colors = dict()
for (key, color) in color_dict.items():
if (key in unique_events):
custom_colors[key] = color
elif (key in event_id):
custom_colors[event_id[key]] = color
else:
warn(('Event ID %s is in the color dict but is not present in events or event_id.' % str(key)))
unassigned = sorted((set(unique_events) - set(custom_colors)))
if len(unassigned):
unassigned_str = ', '.join((str(e) for e in unassigned))
warn(('Color was not assigned for event%s %s. Default colors will be used.' % (_pl(unassigned), unassigned_str)))
default_colors.update(custom_colors)
return default_colors
|
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None, n_cols=None, show=True):
"Plot CSD matrices.\n\n A sub-plot is created for each frequency. If an info object is passed to\n the function, different channel types are plotted in different figures.\n\n Parameters\n ----------\n csd : instance of CrossSpectralDensity\n The CSD matrix to plot.\n info : instance of Info | None\n To split the figure by channel-type, provide the measurement info.\n By default, the CSD matrix is plotted as a whole.\n mode : 'csd' | 'coh'\n Whether to plot the cross-spectral density ('csd', the default), or\n the coherence ('coh') between the channels.\n colorbar : bool\n Whether to show a colorbar. Defaults to ``True``.\n cmap : str | None\n The matplotlib colormap to use. Defaults to None, which means the\n colormap will default to matplotlib's default.\n n_cols : int | None\n CSD matrices are plotted in a grid. This parameter controls how\n many matrix to plot side by side before starting a new row. By\n default, a number will be chosen to make the grid as square as\n possible.\n show : bool\n Whether to show the figure. Defaults to ``True``.\n\n Returns\n -------\n fig : list of Figure\n The figures created by this function.\n "
import matplotlib.pyplot as plt
if (mode not in ['csd', 'coh']):
raise ValueError('"mode" should be either "csd" or "coh".')
if (info is not None):
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False, exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False, exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c]) for c in sel_eeg if (info_ch_names[c] in csd.ch_names)]
idx_mag = [csd.ch_names.index(info_ch_names[c]) for c in sel_mag if (info_ch_names[c] in csd.ch_names)]
idx_grad = [csd.ch_names.index(info_ch_names[c]) for c in sel_grad if (info_ch_names[c] in csd.ch_names)]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if (mode == 'csd'):
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1000000000000.0, grad=1e+26, mag=1e+30)
else:
indices = [np.arange(len(csd.ch_names))]
if (mode == 'csd'):
titles = ['Cross-spectral density']
units = dict()
scalings = dict()
elif (mode == 'coh'):
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if (n_cols is None):
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil((n_freqs / float(n_cols))))
figs = []
for (ind, title, ch_type) in zip(indices, titles, ['eeg', 'mag', 'grad']):
if (len(ind) == 0):
continue
(fig, axes) = plt.subplots(n_rows, n_cols, squeeze=False, figsize=(((2 * n_cols) + 1), (2.2 * n_rows)))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if (mode == 'csd'):
cm = (np.abs(cm) * scalings.get(ch_type, 1))
elif (mode == 'coh'):
psd = np.diag(cm).real
cm = (((np.abs(cm) ** 2) / psd[np.newaxis, :]) / psd[:, np.newaxis])
csd_mats.append(cm)
vmax = np.max(csd_mats)
for (i, (freq, mat)) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[(i // n_cols)][(i % n_cols)]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title(('%.1f-%.1f Hz.' % (np.min(freq), np.max(freq))))
else:
ax.set_title(('%.1f Hz.' % freq))
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if (mode == 'csd'):
label = u'CSD'
if (ch_type in units):
label += (u' (%s)' % units[ch_type])
cb.set_label(label)
elif (mode == 'coh'):
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
| -1,834,918,576,187,389,200
|
Plot CSD matrices.
A sub-plot is created for each frequency. If an info object is passed to
the function, different channel types are plotted in different figures.
Parameters
----------
csd : instance of CrossSpectralDensity
The CSD matrix to plot.
info : instance of Info | None
To split the figure by channel-type, provide the measurement info.
By default, the CSD matrix is plotted as a whole.
mode : 'csd' | 'coh'
Whether to plot the cross-spectral density ('csd', the default), or
the coherence ('coh') between the channels.
colorbar : bool
Whether to show a colorbar. Defaults to ``True``.
cmap : str | None
The matplotlib colormap to use. Defaults to None, which means the
colormap will default to matplotlib's default.
n_cols : int | None
CSD matrices are plotted in a grid. This parameter controls how
many matrix to plot side by side before starting a new row. By
default, a number will be chosen to make the grid as square as
possible.
show : bool
Whether to show the figure. Defaults to ``True``.
Returns
-------
fig : list of Figure
The figures created by this function.
|
mne/viz/misc.py
|
plot_csd
|
Aniket-Pradhan/mne-python
|
python
|
def plot_csd(csd, info=None, mode='csd', colorbar=True, cmap=None, n_cols=None, show=True):
"Plot CSD matrices.\n\n A sub-plot is created for each frequency. If an info object is passed to\n the function, different channel types are plotted in different figures.\n\n Parameters\n ----------\n csd : instance of CrossSpectralDensity\n The CSD matrix to plot.\n info : instance of Info | None\n To split the figure by channel-type, provide the measurement info.\n By default, the CSD matrix is plotted as a whole.\n mode : 'csd' | 'coh'\n Whether to plot the cross-spectral density ('csd', the default), or\n the coherence ('coh') between the channels.\n colorbar : bool\n Whether to show a colorbar. Defaults to ``True``.\n cmap : str | None\n The matplotlib colormap to use. Defaults to None, which means the\n colormap will default to matplotlib's default.\n n_cols : int | None\n CSD matrices are plotted in a grid. This parameter controls how\n many matrix to plot side by side before starting a new row. By\n default, a number will be chosen to make the grid as square as\n possible.\n show : bool\n Whether to show the figure. Defaults to ``True``.\n\n Returns\n -------\n fig : list of Figure\n The figures created by this function.\n "
import matplotlib.pyplot as plt
if (mode not in ['csd', 'coh']):
raise ValueError('"mode" should be either "csd" or "coh".')
if (info is not None):
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False, exclude=[])
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False, exclude=[])
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False, exclude=[])
idx_eeg = [csd.ch_names.index(info_ch_names[c]) for c in sel_eeg if (info_ch_names[c] in csd.ch_names)]
idx_mag = [csd.ch_names.index(info_ch_names[c]) for c in sel_mag if (info_ch_names[c] in csd.ch_names)]
idx_grad = [csd.ch_names.index(info_ch_names[c]) for c in sel_grad if (info_ch_names[c] in csd.ch_names)]
indices = [idx_eeg, idx_mag, idx_grad]
titles = ['EEG', 'Magnetometers', 'Gradiometers']
if (mode == 'csd'):
units = dict(eeg='µV²', grad='fT²/cm²', mag='fT²')
scalings = dict(eeg=1000000000000.0, grad=1e+26, mag=1e+30)
else:
indices = [np.arange(len(csd.ch_names))]
if (mode == 'csd'):
titles = ['Cross-spectral density']
units = dict()
scalings = dict()
elif (mode == 'coh'):
titles = ['Coherence']
n_freqs = len(csd.frequencies)
if (n_cols is None):
n_cols = int(np.ceil(np.sqrt(n_freqs)))
n_rows = int(np.ceil((n_freqs / float(n_cols))))
figs = []
for (ind, title, ch_type) in zip(indices, titles, ['eeg', 'mag', 'grad']):
if (len(ind) == 0):
continue
(fig, axes) = plt.subplots(n_rows, n_cols, squeeze=False, figsize=(((2 * n_cols) + 1), (2.2 * n_rows)))
csd_mats = []
for i in range(len(csd.frequencies)):
cm = csd.get_data(index=i)[ind][:, ind]
if (mode == 'csd'):
cm = (np.abs(cm) * scalings.get(ch_type, 1))
elif (mode == 'coh'):
psd = np.diag(cm).real
cm = (((np.abs(cm) ** 2) / psd[np.newaxis, :]) / psd[:, np.newaxis])
csd_mats.append(cm)
vmax = np.max(csd_mats)
for (i, (freq, mat)) in enumerate(zip(csd.frequencies, csd_mats)):
ax = axes[(i // n_cols)][(i % n_cols)]
im = ax.imshow(mat, interpolation='nearest', cmap=cmap, vmin=0, vmax=vmax)
ax.set_xticks([])
ax.set_yticks([])
if csd._is_sum:
ax.set_title(('%.1f-%.1f Hz.' % (np.min(freq), np.max(freq))))
else:
ax.set_title(('%.1f Hz.' % freq))
plt.suptitle(title)
plt.subplots_adjust(top=0.8)
if colorbar:
cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_])
if (mode == 'csd'):
label = u'CSD'
if (ch_type in units):
label += (u' (%s)' % units[ch_type])
cb.set_label(label)
elif (mode == 'coh'):
cb.set_label('Coherence')
figs.append(fig)
plt_show(show)
return figs
|
def close(self):
'Stops to collect replies from its task.'
self.set_exception(TaskClosed)
self.collector.remove_result(self)
| 576,972,578,691,620,700
|
Stops to collect replies from its task.
|
zeronimo/results.py
|
close
|
sublee/zeronimo
|
python
|
def close(self):
self.set_exception(TaskClosed)
self.collector.remove_result(self)
|
def set_remote_exception(self, remote_exc_info):
'Raises an exception as a :exc:`RemoteException`.'
(exc_type, exc_str, filename, lineno) = remote_exc_info[:4]
exc_type = RemoteException.compose(exc_type)
exc = exc_type(exc_str, filename, lineno, self.worker_info)
if (len(remote_exc_info) > 4):
state = remote_exc_info[4]
exc.__setstate__(state)
self.set_exception(exc)
| -8,435,482,650,142,885,000
|
Raises an exception as a :exc:`RemoteException`.
|
zeronimo/results.py
|
set_remote_exception
|
sublee/zeronimo
|
python
|
def set_remote_exception(self, remote_exc_info):
(exc_type, exc_str, filename, lineno) = remote_exc_info[:4]
exc_type = RemoteException.compose(exc_type)
exc = exc_type(exc_str, filename, lineno, self.worker_info)
if (len(remote_exc_info) > 4):
state = remote_exc_info[4]
exc.__setstate__(state)
self.set_exception(exc)
|
def parseKeyValueData(astr):
"Parses a string of the form:\n 'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'\n returning an opscore.RO.Alg.OrderedDict of the form:\n {keyword1:(value11, value12,...), keyword2:(value21, value22, ...),\n keyword3: (), keyword4: (), ...}\n\n Inputs:\n - astr: the string to parse, of the form:\n keyword1=value11, value12,...; keyword2=value21, value22...\n where:\n - keyword is a keyword; it must start with a letter or underscore\n and may contain those characters or digits thereafter.\n - value is the value of the keyword, one of:\n an integer\n a floating point number\n a string delimited by a pair of single or double quotes\n any enclosed characters identical to the delimiter\n should be escaped by doubling or preceding with a backslash\n - Each keyword may have zero or more comma-separated values;\n if it has zero values then the equals sign may be omitted.\n\n Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,\n one for each keyword. Details:\n - The keywords are given in the order they were specified in the message.\n - If the keyword has no values, valueTuple is ()\n - If the keyword has one value, valueTuple is (value,)\n "
dataDict = opscore.RO.Alg.OrderedDict()
if (astr == ''):
return dataDict
nextInd = 0
while (nextInd is not None):
(keyword, nextInd) = getKeyword(astr, nextInd)
(valueTuple, nextInd) = getValues(astr, nextInd)
dataDict[keyword] = valueTuple
return dataDict
| -1,682,904,546,708,252,200
|
Parses a string of the form:
'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'
returning an opscore.RO.Alg.OrderedDict of the form:
{keyword1:(value11, value12,...), keyword2:(value21, value22, ...),
keyword3: (), keyword4: (), ...}
Inputs:
- astr: the string to parse, of the form:
keyword1=value11, value12,...; keyword2=value21, value22...
where:
- keyword is a keyword; it must start with a letter or underscore
and may contain those characters or digits thereafter.
- value is the value of the keyword, one of:
an integer
a floating point number
a string delimited by a pair of single or double quotes
any enclosed characters identical to the delimiter
should be escaped by doubling or preceding with a backslash
- Each keyword may have zero or more comma-separated values;
if it has zero values then the equals sign may be omitted.
Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,
one for each keyword. Details:
- The keywords are given in the order they were specified in the message.
- If the keyword has no values, valueTuple is ()
- If the keyword has one value, valueTuple is (value,)
|
python/opscore/RO/ParseMsg/ParseData.py
|
parseKeyValueData
|
sdss/opscore
|
python
|
def parseKeyValueData(astr):
"Parses a string of the form:\n 'keyword1=value11, value12,...; keyword2=value21, value22; keyword3=; keyword4; ...'\n returning an opscore.RO.Alg.OrderedDict of the form:\n {keyword1:(value11, value12,...), keyword2:(value21, value22, ...),\n keyword3: (), keyword4: (), ...}\n\n Inputs:\n - astr: the string to parse, of the form:\n keyword1=value11, value12,...; keyword2=value21, value22...\n where:\n - keyword is a keyword; it must start with a letter or underscore\n and may contain those characters or digits thereafter.\n - value is the value of the keyword, one of:\n an integer\n a floating point number\n a string delimited by a pair of single or double quotes\n any enclosed characters identical to the delimiter\n should be escaped by doubling or preceding with a backslash\n - Each keyword may have zero or more comma-separated values;\n if it has zero values then the equals sign may be omitted.\n\n Returns dataDict, an opscore.RO.Alg.OrderedDict of keyword: valueTuple entries,\n one for each keyword. Details:\n - The keywords are given in the order they were specified in the message.\n - If the keyword has no values, valueTuple is ()\n - If the keyword has one value, valueTuple is (value,)\n "
dataDict = opscore.RO.Alg.OrderedDict()
if (astr == ):
return dataDict
nextInd = 0
while (nextInd is not None):
(keyword, nextInd) = getKeyword(astr, nextInd)
(valueTuple, nextInd) = getValues(astr, nextInd)
dataDict[keyword] = valueTuple
return dataDict
|
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n "
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2)
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results)) for (bbox_results, mask_results) in result]
results.extend(result)
if (rank == 0):
batch_size = len(result)
for _ in range((batch_size * world_size)):
prog_bar.update()
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
| -1,266,325,845,621,321,200
|
Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
|
mmdetection/mmdet/apis/test.py
|
multi_gpu_test
|
lizhaoliu-Lec/Conformer
|
python
|
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"Test model with multiple gpus.\n\n This method tests model with multiple gpus and collects the results\n under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n it encodes results to gpu tensors and use gpu communication for results\n collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n and collects them by the rank 0 worker.\n\n Args:\n model (nn.Module): Model to be tested.\n data_loader (nn.Dataloader): Pytorch data loader.\n tmpdir (str): Path of directory to save the temporary results from\n different gpus under cpu mode.\n gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n Returns:\n list: The prediction results.\n "
model.eval()
results = []
dataset = data_loader.dataset
(rank, world_size) = get_dist_info()
if (rank == 0):
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2)
for (i, data) in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results)) for (bbox_results, mask_results) in result]
results.extend(result)
if (rank == 0):
batch_size = len(result)
for _ in range((batch_size * world_size)):
prog_bar.update()
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
|
def dlcboxplot(file, variable, ylab, comparison, jitter=False, colors=False, title=False, save=False, output_dir=None):
'\n file is typically \'dlc_all_avgs_updated.csv\'\n variable is either \'cat_ditance\' or \'vel\'\n ylab is the y-axis label\n colors is a list of two colors (e.g., ["#0062FF", "#DB62FF"])\n output_dir to save the plot in a specific dir when save is True\n '
df = pd.read_csv(file)
tls = trials()
new = ['FT', 'ALONE1', 'SALINE1', 'ALONE2', 'URINE1', 'ALONE3', 'SALINE2', 'ALONE4', 'URINE2', 'ALONE5']
if (variable == 'distance'):
df = df[df['trial'].isin(tls[0::2])]
d = {}
for (i, j) in zip(new, tls):
d[j] = i
df = df.replace(d)
df = df[(df['var'] == variable)]
sns.set(style='ticks', font_scale=1)
plt.figure(figsize=(13, 5), dpi=100)
if (comparison == 'infection_status'):
(test, control) = ('Infected', 'Control')
comparing = 'infection_status'
legend = 'Infection Status'
elif (comparison == 'indoor_outdoor_status'):
(test, control) = ('Indoor-outdoor', 'Indoor')
comparing = 'indoor_outdoor_status'
legend = 'Indoor-outdoor Status'
if (colors is False):
my_pal = {control: '#00FFFF', test: '#E60E3C'}
else:
my_pal = {control: colors[0], test: colors[1]}
ax = sns.boxplot(x='trial', y='value', data=df, hue=comparing, palette=my_pal)
if (jitter is True):
sns.stripplot(x='trial', y='value', data=df, color='black', size=3, jitter=1)
if (variable != 'distance'):
for i in range((len(df['trial'].unique()) - 1)):
if (variable == 'vel'):
plt.vlines((i + 0.5), 10, 45, linestyles='solid', colors='black', alpha=0.2)
elif (variable == 'cat_distance'):
plt.vlines((i + 0.5), 0, 1.3, linestyles='solid', colors='black', alpha=0.2)
if (title is not False):
plt.title(title, fontsize=12)
else:
pass
ax.set_xlabel('Trial', fontsize=12)
ax.set_ylabel(ylab, fontsize=12)
ax.legend(title=legend)
plt.legend(title=legend)
'add significance bars and asterisks between boxes.\n [first pair, second pair], ..., [|, –], ...'
if (variable == 'vel'):
l = [[7.75, 5.75], [8.25, 6.25], [26, 28], [31, 33]]
elif (variable == 'cat_distance'):
l = [[7.75, 5.75], [8.25, 6.25], [0.85, 0.9], [0.95, 1]]
for (x1, x2, y1, y2) in zip(l[0], l[1], l[2], l[3]):
sig = plt.plot([x1, x1, x2, x2], [y1, y2, y2, y1], linewidth=1, color='k')
plt.text(((x1 + x2) * 0.5), (y2 + 0), '*', ha='center', va='bottom', fontsize=18)
plt.show()
fig = ax.get_figure()
if (save is True):
def sav(myString):
return fig.savefig(myString, bbox_inches='tight', dpi=100, pad_inches=0.1)
if (output_dir is not None):
sav(f'{output_dir}/{variable}.png')
else:
sav(f'{variable}.png')
| -8,660,963,386,661,336,000
|
file is typically 'dlc_all_avgs_updated.csv'
variable is either 'cat_ditance' or 'vel'
ylab is the y-axis label
colors is a list of two colors (e.g., ["#0062FF", "#DB62FF"])
output_dir to save the plot in a specific dir when save is True
|
toxopy/dlcboxplot.py
|
dlcboxplot
|
bchaselab/Toxopy
|
python
|
def dlcboxplot(file, variable, ylab, comparison, jitter=False, colors=False, title=False, save=False, output_dir=None):
'\n file is typically \'dlc_all_avgs_updated.csv\'\n variable is either \'cat_ditance\' or \'vel\'\n ylab is the y-axis label\n colors is a list of two colors (e.g., ["#0062FF", "#DB62FF"])\n output_dir to save the plot in a specific dir when save is True\n '
df = pd.read_csv(file)
tls = trials()
new = ['FT', 'ALONE1', 'SALINE1', 'ALONE2', 'URINE1', 'ALONE3', 'SALINE2', 'ALONE4', 'URINE2', 'ALONE5']
if (variable == 'distance'):
df = df[df['trial'].isin(tls[0::2])]
d = {}
for (i, j) in zip(new, tls):
d[j] = i
df = df.replace(d)
df = df[(df['var'] == variable)]
sns.set(style='ticks', font_scale=1)
plt.figure(figsize=(13, 5), dpi=100)
if (comparison == 'infection_status'):
(test, control) = ('Infected', 'Control')
comparing = 'infection_status'
legend = 'Infection Status'
elif (comparison == 'indoor_outdoor_status'):
(test, control) = ('Indoor-outdoor', 'Indoor')
comparing = 'indoor_outdoor_status'
legend = 'Indoor-outdoor Status'
if (colors is False):
my_pal = {control: '#00FFFF', test: '#E60E3C'}
else:
my_pal = {control: colors[0], test: colors[1]}
ax = sns.boxplot(x='trial', y='value', data=df, hue=comparing, palette=my_pal)
if (jitter is True):
sns.stripplot(x='trial', y='value', data=df, color='black', size=3, jitter=1)
if (variable != 'distance'):
for i in range((len(df['trial'].unique()) - 1)):
if (variable == 'vel'):
plt.vlines((i + 0.5), 10, 45, linestyles='solid', colors='black', alpha=0.2)
elif (variable == 'cat_distance'):
plt.vlines((i + 0.5), 0, 1.3, linestyles='solid', colors='black', alpha=0.2)
if (title is not False):
plt.title(title, fontsize=12)
else:
pass
ax.set_xlabel('Trial', fontsize=12)
ax.set_ylabel(ylab, fontsize=12)
ax.legend(title=legend)
plt.legend(title=legend)
'add significance bars and asterisks between boxes.\n [first pair, second pair], ..., [|, –], ...'
if (variable == 'vel'):
l = [[7.75, 5.75], [8.25, 6.25], [26, 28], [31, 33]]
elif (variable == 'cat_distance'):
l = [[7.75, 5.75], [8.25, 6.25], [0.85, 0.9], [0.95, 1]]
for (x1, x2, y1, y2) in zip(l[0], l[1], l[2], l[3]):
sig = plt.plot([x1, x1, x2, x2], [y1, y2, y2, y1], linewidth=1, color='k')
plt.text(((x1 + x2) * 0.5), (y2 + 0), '*', ha='center', va='bottom', fontsize=18)
plt.show()
fig = ax.get_figure()
if (save is True):
def sav(myString):
return fig.savefig(myString, bbox_inches='tight', dpi=100, pad_inches=0.1)
if (output_dir is not None):
sav(f'{output_dir}/{variable}.png')
else:
sav(f'{variable}.png')
|
@task
def set_version(ctx, version):
'Set project version in `src/robot/version.py`` file.\n\n Args:\n version: Project version to set or ``dev`` to set development version.\n\n Following PEP-440 compatible version numbers are supported:\n - Final version like 3.0 or 3.1.2.\n - Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,\n respectively, and an incremented number like 3.0a1 or 3.0.1rc1.\n - Development version with ``.dev`` postix and an incremented number like\n 3.0.dev1 or 3.1a1.dev2.\n\n When the given version is ``dev``, the existing version number is updated\n to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,\n 3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.\n '
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
pom = Version(str(version), POM_PATH, POM_VERSION_PATTERN)
pom.write()
print(version)
| 7,406,903,226,480,384,000
|
Set project version in `src/robot/version.py`` file.
Args:
version: Project version to set or ``dev`` to set development version.
Following PEP-440 compatible version numbers are supported:
- Final version like 3.0 or 3.1.2.
- Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,
respectively, and an incremented number like 3.0a1 or 3.0.1rc1.
- Development version with ``.dev`` postix and an incremented number like
3.0.dev1 or 3.1a1.dev2.
When the given version is ``dev``, the existing version number is updated
to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,
3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.
|
tasks.py
|
set_version
|
ConradDjedjebi/robotframework
|
python
|
@task
def set_version(ctx, version):
'Set project version in `src/robot/version.py`` file.\n\n Args:\n version: Project version to set or ``dev`` to set development version.\n\n Following PEP-440 compatible version numbers are supported:\n - Final version like 3.0 or 3.1.2.\n - Alpha, beta or release candidate with ``a``, ``b`` or ``rc`` postfix,\n respectively, and an incremented number like 3.0a1 or 3.0.1rc1.\n - Development version with ``.dev`` postix and an incremented number like\n 3.0.dev1 or 3.1a1.dev2.\n\n When the given version is ``dev``, the existing version number is updated\n to the next suitable development version. For example, 3.0 -> 3.0.1.dev1,\n 3.1.1 -> 3.1.2.dev1, 3.2a1 -> 3.2a2.dev1, 3.2.dev1 -> 3.2.dev2.\n '
version = Version(version, VERSION_PATH, VERSION_PATTERN)
version.write()
pom = Version(str(version), POM_PATH, POM_VERSION_PATTERN)
pom.write()
print(version)
|
@task
def print_version(ctx):
'Print the current project version.'
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
| -8,988,342,015,607,977,000
|
Print the current project version.
|
tasks.py
|
print_version
|
ConradDjedjebi/robotframework
|
python
|
@task
def print_version(ctx):
print(Version(path=VERSION_PATH, pattern=VERSION_PATTERN))
|
@task
def library_docs(ctx, name):
'Generate standard library documentation.\n\n Args:\n name: Name of the library or ``all`` to generate docs for all libs.\n Name is case-insensitive and can be shortened as long as it\n is a unique prefix. For example, ``b`` is equivalent to\n ``BuiltIn`` and ``di`` equivalent to ``Dialogs``.\n '
libraries = ['BuiltIn', 'Collections', 'DateTime', 'Dialogs', 'OperatingSystem', 'Process', 'Screenshot', 'String', 'Telnet', 'XML']
name = name.lower()
if (name != 'all'):
libraries = [lib for lib in libraries if lib.lower().startswith(name)]
if (len(libraries) != 1):
raise Exit(f"'{name}' is not a unique library prefix.")
for lib in libraries:
libdoc(lib, str(Path(f'doc/libraries/{lib}.html')))
| 1,785,087,739,899,642,000
|
Generate standard library documentation.
Args:
name: Name of the library or ``all`` to generate docs for all libs.
Name is case-insensitive and can be shortened as long as it
is a unique prefix. For example, ``b`` is equivalent to
``BuiltIn`` and ``di`` equivalent to ``Dialogs``.
|
tasks.py
|
library_docs
|
ConradDjedjebi/robotframework
|
python
|
@task
def library_docs(ctx, name):
'Generate standard library documentation.\n\n Args:\n name: Name of the library or ``all`` to generate docs for all libs.\n Name is case-insensitive and can be shortened as long as it\n is a unique prefix. For example, ``b`` is equivalent to\n ``BuiltIn`` and ``di`` equivalent to ``Dialogs``.\n '
libraries = ['BuiltIn', 'Collections', 'DateTime', 'Dialogs', 'OperatingSystem', 'Process', 'Screenshot', 'String', 'Telnet', 'XML']
name = name.lower()
if (name != 'all'):
libraries = [lib for lib in libraries if lib.lower().startswith(name)]
if (len(libraries) != 1):
raise Exit(f"'{name}' is not a unique library prefix.")
for lib in libraries:
libdoc(lib, str(Path(f'doc/libraries/{lib}.html')))
|
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"Generate release notes based on issues in the issue tracker.\n\n Args:\n version: Generate release notes for this version. If not given,\n generated them for the current version.\n username: GitHub username.\n password: GitHub password.\n write: When set to True, write release notes to a file overwriting\n possible existing file. Otherwise just print them to the\n terminal.\n\n Username and password can also be specified using ``GITHUB_USERNAME`` and\n ``GITHUB_PASSWORD`` environment variable, respectively. If they aren't\n specified at all, communication with GitHub is anonymous and typically\n pretty slow.\n "
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = (RELEASE_NOTES_PATH if write else sys.stdout)
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE, RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
| 196,812,704,242,137,700
|
Generate release notes based on issues in the issue tracker.
Args:
version: Generate release notes for this version. If not given,
generated them for the current version.
username: GitHub username.
password: GitHub password.
write: When set to True, write release notes to a file overwriting
possible existing file. Otherwise just print them to the
terminal.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively. If they aren't
specified at all, communication with GitHub is anonymous and typically
pretty slow.
|
tasks.py
|
release_notes
|
ConradDjedjebi/robotframework
|
python
|
@task
def release_notes(ctx, version=None, username=None, password=None, write=False):
"Generate release notes based on issues in the issue tracker.\n\n Args:\n version: Generate release notes for this version. If not given,\n generated them for the current version.\n username: GitHub username.\n password: GitHub password.\n write: When set to True, write release notes to a file overwriting\n possible existing file. Otherwise just print them to the\n terminal.\n\n Username and password can also be specified using ``GITHUB_USERNAME`` and\n ``GITHUB_PASSWORD`` environment variable, respectively. If they aren't\n specified at all, communication with GitHub is anonymous and typically\n pretty slow.\n "
version = Version(version, VERSION_PATH, VERSION_PATTERN)
file = (RELEASE_NOTES_PATH if write else sys.stdout)
generator = ReleaseNotesGenerator(REPOSITORY, RELEASE_NOTES_TITLE, RELEASE_NOTES_INTRO)
generator.generate(version, username, password, file)
|
@task
def init_labels(ctx, username=None, password=None):
'Initialize project by setting labels in the issue tracker.\n\n Args:\n username: GitHub username.\n password: GitHub password.\n\n Username and password can also be specified using ``GITHUB_USERNAME`` and\n ``GITHUB_PASSWORD`` environment variable, respectively.\n\n Should only be executed once when taking ``rellu`` tooling to use or\n when labels it uses have changed.\n '
initialize_labels(REPOSITORY, username, password)
| -2,619,656,879,941,839,400
|
Initialize project by setting labels in the issue tracker.
Args:
username: GitHub username.
password: GitHub password.
Username and password can also be specified using ``GITHUB_USERNAME`` and
``GITHUB_PASSWORD`` environment variable, respectively.
Should only be executed once when taking ``rellu`` tooling to use or
when labels it uses have changed.
|
tasks.py
|
init_labels
|
ConradDjedjebi/robotframework
|
python
|
@task
def init_labels(ctx, username=None, password=None):
'Initialize project by setting labels in the issue tracker.\n\n Args:\n username: GitHub username.\n password: GitHub password.\n\n Username and password can also be specified using ``GITHUB_USERNAME`` and\n ``GITHUB_PASSWORD`` environment variable, respectively.\n\n Should only be executed once when taking ``rellu`` tooling to use or\n when labels it uses have changed.\n '
initialize_labels(REPOSITORY, username, password)
|
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
"Create JAR distribution.\n\n Downloads Jython JAR and PyYAML if needed.\n\n Args:\n jython_version: Jython version to use as a base. Must match version in\n `jython-standalone-<version>.jar` found from Maven central.\n pyyaml_version: Version of PyYAML that will be included in the\n standalone jar. The version must be available from PyPI.\n remove_dist: Control is 'dist' directory initially removed or not.\n "
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print(f"Using '{jython_jar}'.")
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
version = Version(path=VERSION_PATH, pattern=VERSION_PATTERN)
create_robot_jar(ctx, str(version))
| -4,522,507,211,498,932,000
|
Create JAR distribution.
Downloads Jython JAR and PyYAML if needed.
Args:
jython_version: Jython version to use as a base. Must match version in
`jython-standalone-<version>.jar` found from Maven central.
pyyaml_version: Version of PyYAML that will be included in the
standalone jar. The version must be available from PyPI.
remove_dist: Control is 'dist' directory initially removed or not.
|
tasks.py
|
jar
|
ConradDjedjebi/robotframework
|
python
|
@task
def jar(ctx, jython_version='2.7.0', pyyaml_version='3.11', remove_dist=False):
"Create JAR distribution.\n\n Downloads Jython JAR and PyYAML if needed.\n\n Args:\n jython_version: Jython version to use as a base. Must match version in\n `jython-standalone-<version>.jar` found from Maven central.\n pyyaml_version: Version of PyYAML that will be included in the\n standalone jar. The version must be available from PyPI.\n remove_dist: Control is 'dist' directory initially removed or not.\n "
clean(ctx, remove_dist, create_dirs=True)
jython_jar = get_jython_jar(jython_version)
print(f"Using '{jython_jar}'.")
compile_java_files(ctx, jython_jar)
unzip_jar(jython_jar)
copy_robot_files()
pyaml_archive = get_pyyaml(pyyaml_version)
extract_and_copy_pyyaml_files(pyyaml_version, pyaml_archive)
compile_python_files(ctx, jython_jar)
version = Version(path=VERSION_PATH, pattern=VERSION_PATTERN)
create_robot_jar(ctx, str(version))
|
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
'Setup the Time and Date sensor.'
if (hass.config.time_zone is None):
_LOGGER.error('Timezone is not set in Home Assistant configuration')
return False
devices = []
for variable in config[CONF_DISPLAY_OPTIONS]:
devices.append(TimeDateSensor(variable))
hass.loop.create_task(async_add_devices(devices, True))
return True
| -7,925,590,846,427,815,000
|
Setup the Time and Date sensor.
|
homeassistant/components/sensor/time_date.py
|
async_setup_platform
|
mweinelt/home-assistant
|
python
|
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
if (hass.config.time_zone is None):
_LOGGER.error('Timezone is not set in Home Assistant configuration')
return False
devices = []
for variable in config[CONF_DISPLAY_OPTIONS]:
devices.append(TimeDateSensor(variable))
hass.loop.create_task(async_add_devices(devices, True))
return True
|
def __init__(self, option_type):
'Initialize the sensor.'
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
| -915,315,216,129,578,400
|
Initialize the sensor.
|
homeassistant/components/sensor/time_date.py
|
__init__
|
mweinelt/home-assistant
|
python
|
def __init__(self, option_type):
self._name = OPTION_TYPES[option_type]
self.type = option_type
self._state = None
|
@property
def name(self):
'Return the name of the sensor.'
return self._name
| 8,691,954,631,286,512,000
|
Return the name of the sensor.
|
homeassistant/components/sensor/time_date.py
|
name
|
mweinelt/home-assistant
|
python
|
@property
def name(self):
return self._name
|
@property
def state(self):
'Return the state of the sensor.'
return self._state
| -2,324,550,726,442,955,000
|
Return the state of the sensor.
|
homeassistant/components/sensor/time_date.py
|
state
|
mweinelt/home-assistant
|
python
|
@property
def state(self):
return self._state
|
@property
def icon(self):
'Icon to use in the frontend, if any.'
if (('date' in self.type) and ('time' in self.type)):
return 'mdi:calendar-clock'
elif ('date' in self.type):
return 'mdi:calendar'
else:
return 'mdi:clock'
| -2,937,875,691,628,948,000
|
Icon to use in the frontend, if any.
|
homeassistant/components/sensor/time_date.py
|
icon
|
mweinelt/home-assistant
|
python
|
@property
def icon(self):
if (('date' in self.type) and ('time' in self.type)):
return 'mdi:calendar-clock'
elif ('date' in self.type):
return 'mdi:calendar'
else:
return 'mdi:clock'
|
@asyncio.coroutine
def async_update(self):
'Get the latest data and updates the states.'
time_date = dt_util.utcnow()
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
time_bmt = (time_date + timedelta(hours=1))
delta = timedelta(hours=time_bmt.hour, minutes=time_bmt.minute, seconds=time_bmt.second, microseconds=time_bmt.microsecond)
beat = int(((delta.seconds + (delta.microseconds / 1000000.0)) / 86.4))
if (self.type == 'time'):
self._state = time
elif (self.type == 'date'):
self._state = date
elif (self.type == 'date_time'):
self._state = '{}, {}'.format(date, time)
elif (self.type == 'time_date'):
self._state = '{}, {}'.format(time, date)
elif (self.type == 'time_utc'):
self._state = time_utc
elif (self.type == 'beat'):
self._state = '@{0:03d}'.format(beat)
| -2,005,476,636,452,129,800
|
Get the latest data and updates the states.
|
homeassistant/components/sensor/time_date.py
|
async_update
|
mweinelt/home-assistant
|
python
|
@asyncio.coroutine
def async_update(self):
time_date = dt_util.utcnow()
time = dt_util.as_local(time_date).strftime(TIME_STR_FORMAT)
time_utc = time_date.strftime(TIME_STR_FORMAT)
date = dt_util.as_local(time_date).date().isoformat()
time_bmt = (time_date + timedelta(hours=1))
delta = timedelta(hours=time_bmt.hour, minutes=time_bmt.minute, seconds=time_bmt.second, microseconds=time_bmt.microsecond)
beat = int(((delta.seconds + (delta.microseconds / 1000000.0)) / 86.4))
if (self.type == 'time'):
self._state = time
elif (self.type == 'date'):
self._state = date
elif (self.type == 'date_time'):
self._state = '{}, {}'.format(date, time)
elif (self.type == 'time_date'):
self._state = '{}, {}'.format(time, date)
elif (self.type == 'time_utc'):
self._state = time_utc
elif (self.type == 'beat'):
self._state = '@{0:03d}'.format(beat)
|
def get_local_ip():
'Rewrite this stub, it is used in code not checked in yet '
return '127.0.0.1'
| -8,942,152,617,992,478,000
|
Rewrite this stub, it is used in code not checked in yet
|
CMR/python/cmr/util/network.py
|
get_local_ip
|
nasa/eo-metadata-tools
|
python
|
def get_local_ip():
' '
return '127.0.0.1'
|
def value_to_param(key, value):
'\n Convert a key value pair into a URL parameter pair\n '
value = str(value)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result = ((encoded_key + '=') + encoded_value)
return result
| -646,888,271,832,497,000
|
Convert a key value pair into a URL parameter pair
|
CMR/python/cmr/util/network.py
|
value_to_param
|
nasa/eo-metadata-tools
|
python
|
def value_to_param(key, value):
'\n \n '
value = str(value)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result = ((encoded_key + '=') + encoded_value)
return result
|
def expand_parameter_to_parameters(key, parameter):
'\n Convert a list of values into a list of URL parameters\n '
result = []
if isinstance(parameter, list):
for item in parameter:
param = value_to_param(key, item)
result.append(param)
else:
value = str(parameter)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result.append(((encoded_key + '=') + encoded_value))
return result
| 7,476,469,016,591,322,000
|
Convert a list of values into a list of URL parameters
|
CMR/python/cmr/util/network.py
|
expand_parameter_to_parameters
|
nasa/eo-metadata-tools
|
python
|
def expand_parameter_to_parameters(key, parameter):
'\n \n '
result = []
if isinstance(parameter, list):
for item in parameter:
param = value_to_param(key, item)
result.append(param)
else:
value = str(parameter)
encoded_key = urllib.parse.quote(key)
encoded_value = urllib.parse.quote(value)
result.append(((encoded_key + '=') + encoded_value))
return result
|
def expand_query_to_parameters(query=None):
' Convert a dictionary to URL parameters '
params = []
if (query is None):
return ''
keys = sorted(query.keys())
for key in keys:
value = query[key]
params = (params + expand_parameter_to_parameters(key, value))
return '&'.join(params)
| 3,580,768,496,016,687,000
|
Convert a dictionary to URL parameters
|
CMR/python/cmr/util/network.py
|
expand_query_to_parameters
|
nasa/eo-metadata-tools
|
python
|
def expand_query_to_parameters(query=None):
' '
params = []
if (query is None):
return
keys = sorted(query.keys())
for key in keys:
value = query[key]
params = (params + expand_parameter_to_parameters(key, value))
return '&'.join(params)
|
def apply_headers_to_request(req, headers):
'Apply a headers to a urllib request object '
if ((headers is not None) and (req is not None)):
for key in headers:
value = headers[key]
if ((value is not None) and (len(value) > 0)):
req.add_header(key, value)
| -7,944,122,521,479,747,000
|
Apply a headers to a urllib request object
|
CMR/python/cmr/util/network.py
|
apply_headers_to_request
|
nasa/eo-metadata-tools
|
python
|
def apply_headers_to_request(req, headers):
' '
if ((headers is not None) and (req is not None)):
for key in headers:
value = headers[key]
if ((value is not None) and (len(value) > 0)):
req.add_header(key, value)
|
def transform_results(results, keys_of_interest):
'\n Take a list of results and convert them to a multi valued dictionary. The\n real world use case is to take values from a list of collections and pass\n them to a granule search.\n\n [{key1:value1},{key1:value2},...] -> {"key1": [value1,value2]} ->\n &key1=value1&key1=value2 ( via expand_query_to_parameters() )\n '
params = {}
for item in results:
for key in keys_of_interest:
if (key in item):
value = item[key]
if (key in params):
params[key].append(value)
else:
params[key] = [value]
return params
| 5,444,962,081,587,083,000
|
Take a list of results and convert them to a multi valued dictionary. The
real world use case is to take values from a list of collections and pass
them to a granule search.
[{key1:value1},{key1:value2},...] -> {"key1": [value1,value2]} ->
&key1=value1&key1=value2 ( via expand_query_to_parameters() )
|
CMR/python/cmr/util/network.py
|
transform_results
|
nasa/eo-metadata-tools
|
python
|
def transform_results(results, keys_of_interest):
'\n Take a list of results and convert them to a multi valued dictionary. The\n real world use case is to take values from a list of collections and pass\n them to a granule search.\n\n [{key1:value1},{key1:value2},...] -> {"key1": [value1,value2]} ->\n &key1=value1&key1=value2 ( via expand_query_to_parameters() )\n '
params = {}
for item in results:
for key in keys_of_interest:
if (key in item):
value = item[key]
if (key in params):
params[key].append(value)
else:
params[key] = [value]
return params
|
def config_to_header(config, source_key, headers, destination_key=None, default=None):
'\n Copy a value in the config into a header dictionary for use by urllib. Written\n to reduce boiler plate code\n\n config[key] -> [or default] -> [rename] -> headers[key]\n\n Parameters:\n config(dictionary): where to look for values\n source_key(string): name if configuration in config\n headers(dictionary): where to copy values to\n destination_key(string): name of key to save to in headers\n default(string): value to use if value can not be found in config\n '
config = common.always(config)
if (destination_key is None):
destination_key = source_key
value = config.get(source_key, default)
if ((destination_key is not None) and (value is not None)):
if (headers is None):
headers = {}
headers[destination_key] = value
return headers
| -482,853,321,896,843,800
|
Copy a value in the config into a header dictionary for use by urllib. Written
to reduce boiler plate code
config[key] -> [or default] -> [rename] -> headers[key]
Parameters:
config(dictionary): where to look for values
source_key(string): name if configuration in config
headers(dictionary): where to copy values to
destination_key(string): name of key to save to in headers
default(string): value to use if value can not be found in config
|
CMR/python/cmr/util/network.py
|
config_to_header
|
nasa/eo-metadata-tools
|
python
|
def config_to_header(config, source_key, headers, destination_key=None, default=None):
'\n Copy a value in the config into a header dictionary for use by urllib. Written\n to reduce boiler plate code\n\n config[key] -> [or default] -> [rename] -> headers[key]\n\n Parameters:\n config(dictionary): where to look for values\n source_key(string): name if configuration in config\n headers(dictionary): where to copy values to\n destination_key(string): name of key to save to in headers\n default(string): value to use if value can not be found in config\n '
config = common.always(config)
if (destination_key is None):
destination_key = source_key
value = config.get(source_key, default)
if ((destination_key is not None) and (value is not None)):
if (headers is None):
headers = {}
headers[destination_key] = value
return headers
|
def post(url, body, accept=None, headers=None):
'\n Make a basic HTTP call to CMR using the POST action\n Parameters:\n url (string): resource to get\n body (dictionary): parameters to send, or string if raw text to be sent\n accept (string): encoding of the returned data, some form of json is expected\n client_id (string): name of the client making the (not python or curl)\n headers (dictionary): HTTP headers to apply\n '
if isinstance(body, str):
data = body
else:
data = expand_query_to_parameters(body)
data = data.encode('utf-8')
logger.debug(' Headers->CMR= %s', headers)
logger.debug(' POST Data= %s', data)
req = urllib.request.Request(url, data)
if (accept is not None):
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if (resp.status == 200):
obj_json = json.loads(raw_response)
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if (logger.getEffectiveLevel() == logging.DEBUG):
stringified = str(common.mask_dictionary(head_list, ['cmr-token', 'authorization']))
logger.debug(' CMR->Headers = %s', stringified)
obj_json['http-headers'] = head_list
elif (resp.status == 204):
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if (raw_response.startswith('{') and raw_response.endswith('}')):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
| 4,660,827,970,494,226,000
|
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
|
CMR/python/cmr/util/network.py
|
post
|
nasa/eo-metadata-tools
|
python
|
def post(url, body, accept=None, headers=None):
'\n Make a basic HTTP call to CMR using the POST action\n Parameters:\n url (string): resource to get\n body (dictionary): parameters to send, or string if raw text to be sent\n accept (string): encoding of the returned data, some form of json is expected\n client_id (string): name of the client making the (not python or curl)\n headers (dictionary): HTTP headers to apply\n '
if isinstance(body, str):
data = body
else:
data = expand_query_to_parameters(body)
data = data.encode('utf-8')
logger.debug(' Headers->CMR= %s', headers)
logger.debug(' POST Data= %s', data)
req = urllib.request.Request(url, data)
if (accept is not None):
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if (resp.status == 200):
obj_json = json.loads(raw_response)
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if (logger.getEffectiveLevel() == logging.DEBUG):
stringified = str(common.mask_dictionary(head_list, ['cmr-token', 'authorization']))
logger.debug(' CMR->Headers = %s', stringified)
obj_json['http-headers'] = head_list
elif (resp.status == 204):
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if (raw_response.startswith('{') and raw_response.endswith('}')):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
|
def get(url, accept=None, headers=None):
'\n Make a basic HTTP call to CMR using the POST action\n Parameters:\n url (string): resource to get\n body (dictionary): parameters to send, or string if raw text to be sent\n accept (string): encoding of the returned data, some form of json is expected\n client_id (string): name of the client making the (not python or curl)\n headers (dictionary): HTTP headers to apply\n '
logger.debug(' Headers->CMR= %s', headers)
req = urllib.request.Request(url)
if (accept is not None):
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if (resp.status == 200):
obj_json = json.loads(raw_response)
if isinstance(obj_json, list):
data = obj_json
obj_json = {'hits': len(data), 'items': data}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if (logger.getEffectiveLevel() == logging.DEBUG):
stringified = str(common.mask_dictionary(head_list, ['cmr-token', 'authorization']))
logger.debug(' CMR->Headers = %s', stringified)
elif (resp.status == 204):
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if (raw_response.startswith('{') and raw_response.endswith('}')):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
| -1,446,447,083,311,810,600
|
Make a basic HTTP call to CMR using the POST action
Parameters:
url (string): resource to get
body (dictionary): parameters to send, or string if raw text to be sent
accept (string): encoding of the returned data, some form of json is expected
client_id (string): name of the client making the (not python or curl)
headers (dictionary): HTTP headers to apply
|
CMR/python/cmr/util/network.py
|
get
|
nasa/eo-metadata-tools
|
python
|
def get(url, accept=None, headers=None):
'\n Make a basic HTTP call to CMR using the POST action\n Parameters:\n url (string): resource to get\n body (dictionary): parameters to send, or string if raw text to be sent\n accept (string): encoding of the returned data, some form of json is expected\n client_id (string): name of the client making the (not python or curl)\n headers (dictionary): HTTP headers to apply\n '
logger.debug(' Headers->CMR= %s', headers)
req = urllib.request.Request(url)
if (accept is not None):
apply_headers_to_request(req, {'Accept': accept})
apply_headers_to_request(req, headers)
try:
resp = urllib.request.urlopen(req)
response = resp.read()
raw_response = response.decode('utf-8')
if (resp.status == 200):
obj_json = json.loads(raw_response)
if isinstance(obj_json, list):
data = obj_json
obj_json = {'hits': len(data), 'items': data}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
if (logger.getEffectiveLevel() == logging.DEBUG):
stringified = str(common.mask_dictionary(head_list, ['cmr-token', 'authorization']))
logger.debug(' CMR->Headers = %s', stringified)
elif (resp.status == 204):
obj_json = {}
head_list = {}
for head in resp.getheaders():
head_list[head[0]] = head[1]
obj_json['http-headers'] = head_list
else:
if (raw_response.startswith('{') and raw_response.endswith('}')):
return json.loads(raw_response)
return raw_response
return obj_json
except urllib.error.HTTPError as exception:
raw_response = exception.read()
try:
obj_json = json.loads(raw_response)
obj_json['code'] = exception.code
obj_json['reason'] = exception.reason
return obj_json
except json.decoder.JSONDecodeError as err:
return err
return raw_response
|
def __init__(self, options):
'\n Constructor\n '
'\n Initialize ROC SDK. looks for the license file and optionally we can provide a log file. If it cannot find the license then it will quit. Roc_ensure catches the error and aborts.\n '
global roc
import roc as _local_roc
roc = _local_roc
if (os.environ.get('ROC_LIC') is not None):
roc.roc_ensure(roc.roc_initialize(None, None))
else:
self.license_file = (roc.__file__.split('python')[0] + 'ROC.lic')
roc.roc_ensure(roc.roc_initialize(self.license_file.encode('utf-8'), None))
print('ROC SDK Initialized')
self.img_quality = options.img_quality
self.num_faces = options.num_faces
self.min_face_size = options.min_face_size
self.detection_threshold = self.recommendedDetectionThreshold()
if (self.img_quality is None):
self.img_quality = self.recommendedImgQuality()
if (self.num_faces is None):
self.num_faces = self.recommendedMaxFacesDetected()
'\n ROC_Frontal : ROC frontal face detector (-30 to +30 degress yaw)\n ROC_FR : Represent in-the-wild-faces for comparison\n Note : Non-frontal faces detected by ROC_FULL and ROC_PARTIAL are not reliable for recognition.\n Therefore we advise against using ROC_FULL or ROC_PARTIAL in conjunction with ROC_FR or ROC_ID.\n ROC_FULL : ROC face detector (-100 to +100 degrees yaw)\n ROC_DEMOGRAPHICS - Return age, gender, sex\n ROC_PITCHYAW - Returns yaw and pitch\n '
self.algorithm_id_detect = roc.ROC_FULL
self.algorithm_id_extract = ((((roc.ROC_MANUAL | roc.ROC_FR) | roc.ROC_DEMOGRAPHICS) | roc.ROC_LANDMARKS) | roc.ROC_PITCHYAW)
roc.roc_ensure(roc.roc_preload(self.algorithm_id_detect))
roc.roc_ensure(roc.roc_preload(self.algorithm_id_extract))
| -5,256,902,782,774,535,000
|
Constructor
|
src/faro/face_workers/RankOneFaceWorker.py
|
__init__
|
ORNL/faro
|
python
|
def __init__(self, options):
'\n \n '
'\n Initialize ROC SDK. looks for the license file and optionally we can provide a log file. If it cannot find the license then it will quit. Roc_ensure catches the error and aborts.\n '
global roc
import roc as _local_roc
roc = _local_roc
if (os.environ.get('ROC_LIC') is not None):
roc.roc_ensure(roc.roc_initialize(None, None))
else:
self.license_file = (roc.__file__.split('python')[0] + 'ROC.lic')
roc.roc_ensure(roc.roc_initialize(self.license_file.encode('utf-8'), None))
print('ROC SDK Initialized')
self.img_quality = options.img_quality
self.num_faces = options.num_faces
self.min_face_size = options.min_face_size
self.detection_threshold = self.recommendedDetectionThreshold()
if (self.img_quality is None):
self.img_quality = self.recommendedImgQuality()
if (self.num_faces is None):
self.num_faces = self.recommendedMaxFacesDetected()
'\n ROC_Frontal : ROC frontal face detector (-30 to +30 degress yaw)\n ROC_FR : Represent in-the-wild-faces for comparison\n Note : Non-frontal faces detected by ROC_FULL and ROC_PARTIAL are not reliable for recognition.\n Therefore we advise against using ROC_FULL or ROC_PARTIAL in conjunction with ROC_FR or ROC_ID.\n ROC_FULL : ROC face detector (-100 to +100 degrees yaw)\n ROC_DEMOGRAPHICS - Return age, gender, sex\n ROC_PITCHYAW - Returns yaw and pitch\n '
self.algorithm_id_detect = roc.ROC_FULL
self.algorithm_id_extract = ((((roc.ROC_MANUAL | roc.ROC_FR) | roc.ROC_DEMOGRAPHICS) | roc.ROC_LANDMARKS) | roc.ROC_PITCHYAW)
roc.roc_ensure(roc.roc_preload(self.algorithm_id_detect))
roc.roc_ensure(roc.roc_preload(self.algorithm_id_extract))
|
def _rocFlatten(self, tmpl):
'\n Converts roc template to serialized data.\n Datatype = bytes\n '
buffer_size = roc.new_size_t()
roc.roc_flattened_bytes(tmpl, buffer_size)
buffer_size_int = roc.size_t_value(buffer_size)
roc_buffer_src = roc.new_uint8_t_array(buffer_size_int)
roc.roc_flatten(tmpl, roc_buffer_src)
native_buffer = roc.cdata(roc_buffer_src, buffer_size_int)
roc.delete_size_t(buffer_size)
roc.delete_uint8_t_array(roc_buffer_src)
return native_buffer
| -7,773,845,692,104,771,000
|
Converts roc template to serialized data.
Datatype = bytes
|
src/faro/face_workers/RankOneFaceWorker.py
|
_rocFlatten
|
ORNL/faro
|
python
|
def _rocFlatten(self, tmpl):
'\n Converts roc template to serialized data.\n Datatype = bytes\n '
buffer_size = roc.new_size_t()
roc.roc_flattened_bytes(tmpl, buffer_size)
buffer_size_int = roc.size_t_value(buffer_size)
roc_buffer_src = roc.new_uint8_t_array(buffer_size_int)
roc.roc_flatten(tmpl, roc_buffer_src)
native_buffer = roc.cdata(roc_buffer_src, buffer_size_int)
roc.delete_size_t(buffer_size)
roc.delete_uint8_t_array(roc_buffer_src)
return native_buffer
|
def _rocUnFlatten(self, buff, template_dst):
'\n Converts serialized data back to roc template.\n '
roc_buffer_dst = roc.new_uint8_t_array((len(buff) + 1))
roc.memmove(roc_buffer_dst, buff)
roc.roc_unflatten(roc_buffer_dst, template_dst)
roc.delete_uint8_t_array(roc_buffer_dst)
return template_dst
| -7,051,053,376,622,155,000
|
Converts serialized data back to roc template.
|
src/faro/face_workers/RankOneFaceWorker.py
|
_rocUnFlatten
|
ORNL/faro
|
python
|
def _rocUnFlatten(self, buff, template_dst):
'\n \n '
roc_buffer_dst = roc.new_uint8_t_array((len(buff) + 1))
roc.memmove(roc_buffer_dst, buff)
roc.roc_unflatten(roc_buffer_dst, template_dst)
roc.delete_uint8_t_array(roc_buffer_dst)
return template_dst
|
def _detect(self, im, opts):
'\n In RankOne, face detection happends within the roc_represent function.\n There is no explicit face detection step like in dlib. \n But we will output the bounding box. but it is not really useful in this case. \n '
'\n Rank one requires the image to be of type roc_image. Hence\n we will check for the image type. In this case it is a numpy array (skimage imread). \n Check if the image is a numpy array and if it is then conver it to a PIL image and \n then to a roc_image. The reason I am doing this is cause rankone provides example code \n to convert from PIL image to roc_image.\n '
(h, w, _) = im.shape
if isinstance(im, np.ndarray):
im = self._converttoRocImage(im)
'\n indicates the smalled face to detect\n Face detection size is measured by the width of the face in pixels. \n The default value is 36. It roughly correspinds to 18 pixels between the eyes.\n '
if (self.min_face_size == 'recommended'):
self.min_face_size = self.recommendedMinFaceSize()
elif (self.min_face_size == 'adaptive_size'):
'\n A method for determining the minimum face detection size as a fraction of the image size.\n\n In the interest of efficiency, it is recommended to set a lower bound on the minimum face detection size as a fraction of the image size. Given a relative minimum size of 4% of the image dimensions, and an absolute minimum size of 36 pixels, the adaptive minimum size is: max(max(image.width, image.height) * 0.04, 36).\n\n Example\n roc_image image = ...;\n size_t adaptive_minimum_size;\n roc_adaptive_minimum_size(image, 0.04, 36, &adaptive_minimum_size);\n '
adaptive_minimum_size = new_size_t()
roc_ensure(roc_adaptive_minimum_size(im, 0.04, 36, adaptive_minimum_size))
else:
self.min_face_size = int(self.min_face_size)
self.detection_threshold = opts.threshold
if opts.best:
self.num_faces = 1
templates = roc.new_roc_template_array(self.num_faces)
if (self.min_face_size != 'adaptive_size'):
roc.roc_represent(im, self.algorithm_id_detect, self.min_face_size, self.num_faces, self.detection_threshold, self.img_quality, templates)
else:
roc.roc_represent(im, self.algorithm_id_detect, size_t_value(adaptive_minimum_size), self.num_faces, detection_threshold, self.img_quality, templates)
roc.delete_size_t(adaptive_minimum_size)
curr_template = roc.roc_template_array_getitem(templates, 0)
if ((curr_template.algorithm_id == 0) or (curr_template.algorithm_id & roc.ROC_INVALID)):
curr_template = roc.roc_template_array_getitem(templates, 0)
curr_template.detection.x = int((w * 0.5))
curr_template.detection.y = int((h * 0.5))
curr_template.detection.width = w
curr_template.detection.height = h
roc.roc_template_array_setitem(templates, 0, curr_template)
roc.roc_represent(im, roc.ROC_MANUAL, self.min_face_size, 1, self.detection_threshold, self.img_quality, templates)
roc.roc_free_image(im)
return templates
| 5,703,304,084,208,401,000
|
In RankOne, face detection happends within the roc_represent function.
There is no explicit face detection step like in dlib.
But we will output the bounding box. but it is not really useful in this case.
|
src/faro/face_workers/RankOneFaceWorker.py
|
_detect
|
ORNL/faro
|
python
|
def _detect(self, im, opts):
'\n In RankOne, face detection happends within the roc_represent function.\n There is no explicit face detection step like in dlib. \n But we will output the bounding box. but it is not really useful in this case. \n '
'\n Rank one requires the image to be of type roc_image. Hence\n we will check for the image type. In this case it is a numpy array (skimage imread). \n Check if the image is a numpy array and if it is then conver it to a PIL image and \n then to a roc_image. The reason I am doing this is cause rankone provides example code \n to convert from PIL image to roc_image.\n '
(h, w, _) = im.shape
if isinstance(im, np.ndarray):
im = self._converttoRocImage(im)
'\n indicates the smalled face to detect\n Face detection size is measured by the width of the face in pixels. \n The default value is 36. It roughly correspinds to 18 pixels between the eyes.\n '
if (self.min_face_size == 'recommended'):
self.min_face_size = self.recommendedMinFaceSize()
elif (self.min_face_size == 'adaptive_size'):
'\n A method for determining the minimum face detection size as a fraction of the image size.\n\n In the interest of efficiency, it is recommended to set a lower bound on the minimum face detection size as a fraction of the image size. Given a relative minimum size of 4% of the image dimensions, and an absolute minimum size of 36 pixels, the adaptive minimum size is: max(max(image.width, image.height) * 0.04, 36).\n\n Example\n roc_image image = ...;\n size_t adaptive_minimum_size;\n roc_adaptive_minimum_size(image, 0.04, 36, &adaptive_minimum_size);\n '
adaptive_minimum_size = new_size_t()
roc_ensure(roc_adaptive_minimum_size(im, 0.04, 36, adaptive_minimum_size))
else:
self.min_face_size = int(self.min_face_size)
self.detection_threshold = opts.threshold
if opts.best:
self.num_faces = 1
templates = roc.new_roc_template_array(self.num_faces)
if (self.min_face_size != 'adaptive_size'):
roc.roc_represent(im, self.algorithm_id_detect, self.min_face_size, self.num_faces, self.detection_threshold, self.img_quality, templates)
else:
roc.roc_represent(im, self.algorithm_id_detect, size_t_value(adaptive_minimum_size), self.num_faces, detection_threshold, self.img_quality, templates)
roc.delete_size_t(adaptive_minimum_size)
curr_template = roc.roc_template_array_getitem(templates, 0)
if ((curr_template.algorithm_id == 0) or (curr_template.algorithm_id & roc.ROC_INVALID)):
curr_template = roc.roc_template_array_getitem(templates, 0)
curr_template.detection.x = int((w * 0.5))
curr_template.detection.y = int((h * 0.5))
curr_template.detection.width = w
curr_template.detection.height = h
roc.roc_template_array_setitem(templates, 0, curr_template)
roc.roc_represent(im, roc.ROC_MANUAL, self.min_face_size, 1, self.detection_threshold, self.img_quality, templates)
roc.roc_free_image(im)
return templates
|
def locate(self, img, face_records, options):
'\n Not needed as we find the location of the eyes, nose and chin during detection and have \n added it to face records during detection\n '
pass
| -7,378,047,175,457,492,000
|
Not needed as we find the location of the eyes, nose and chin during detection and have
added it to face records during detection
|
src/faro/face_workers/RankOneFaceWorker.py
|
locate
|
ORNL/faro
|
python
|
def locate(self, img, face_records, options):
'\n Not needed as we find the location of the eyes, nose and chin during detection and have \n added it to face records during detection\n '
pass
|
def align(self, image, face_records):
'Align the images to a standard size and orientation to allow \n recognition.'
pass
| 1,324,541,208,925,305,900
|
Align the images to a standard size and orientation to allow
recognition.
|
src/faro/face_workers/RankOneFaceWorker.py
|
align
|
ORNL/faro
|
python
|
def align(self, image, face_records):
'Align the images to a standard size and orientation to allow \n recognition.'
pass
|
def scoreType(self):
'Return the method used to create a score from the template.\n \n By default server computation is required.\n \n SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER\n '
return fsd.SERVER
| -8,849,982,573,337,070,000
|
Return the method used to create a score from the template.
By default server computation is required.
SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER
|
src/faro/face_workers/RankOneFaceWorker.py
|
scoreType
|
ORNL/faro
|
python
|
def scoreType(self):
'Return the method used to create a score from the template.\n \n By default server computation is required.\n \n SCORE_L1, SCORE_L2, SCORE_DOT, SCORE_SERVER\n '
return fsd.SERVER
|
def score(self, score_request):
'Compare templates to produce scores.'
score_type = self.scoreType()
result = geo.Matrix()
if (score_type not in [fsd.SERVER]):
raise NotImplementedError(('Score type <%s> not implemented.' % (score_type,)))
if (len(score_request.template_probes.templates) == 0):
raise ValueError('no probe templates were found in the arguments.')
if (len(score_request.template_gallery.templates) == 0):
raise ValueError('no gallery templates were found in the arguments.')
'\n if min(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) != 0:\n raise ValueError("probes argument cannot have both face_probes and template_probes defined.")\n if max(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) == 0:\n raise ValueError("no probe templates were found in the arguments.")\n if min(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) != 0:\n raise ValueError("gallery argument cannot have both face_gallery and template_gallery defined.")\n if max(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) == 0:\n raise ValueError("no gallery templates were found in the arguments.")\n '
if (score_type == fsd.SERVER):
sim_mat = np.zeros((len(score_request.template_probes.templates), len(score_request.template_gallery.templates)), dtype=np.float32)
roc_probe_template = roc.roc_template()
roc_gallery_template = roc.roc_template()
sm_metric = roc.new_roc_similarity()
for p in range(0, len(score_request.template_probes.templates)):
self._rocUnFlatten(score_request.template_probes.templates[p].buffer, roc_probe_template)
for g in range(0, len(score_request.template_gallery.templates)):
self._rocUnFlatten(score_request.template_gallery.templates[g].buffer, roc_gallery_template)
roc.roc_compare_templates(roc_probe_template, roc_gallery_template, sm_metric)
sim_mat[(p, g)] = roc.roc_similarity_value(sm_metric)
roc.delete_roc_similarity(sm_metric)
roc.roc_free_template(roc_probe_template)
roc.roc_free_template(roc_gallery_template)
else:
NotImplementedError(('ScoreType %s is not implemented.' % (score_type,)))
sim_mat[(sim_mat == (- 1.0))] = 0.0
dist_mat = (1.0 - sim_mat)
return pt.matrix_np2proto(dist_mat)
| -6,922,211,263,441,873,000
|
Compare templates to produce scores.
|
src/faro/face_workers/RankOneFaceWorker.py
|
score
|
ORNL/faro
|
python
|
def score(self, score_request):
score_type = self.scoreType()
result = geo.Matrix()
if (score_type not in [fsd.SERVER]):
raise NotImplementedError(('Score type <%s> not implemented.' % (score_type,)))
if (len(score_request.template_probes.templates) == 0):
raise ValueError('no probe templates were found in the arguments.')
if (len(score_request.template_gallery.templates) == 0):
raise ValueError('no gallery templates were found in the arguments.')
'\n if min(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) != 0:\n raise ValueError("probes argument cannot have both face_probes and template_probes defined.")\n if max(len(score_request.face_probes.face_records),len(score_request.template_probes.templates)) == 0:\n raise ValueError("no probe templates were found in the arguments.")\n if min(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) != 0:\n raise ValueError("gallery argument cannot have both face_gallery and template_gallery defined.")\n if max(len(score_request.face_gallery.face_records),len(score_request.template_gallery.templates)) == 0:\n raise ValueError("no gallery templates were found in the arguments.")\n '
if (score_type == fsd.SERVER):
sim_mat = np.zeros((len(score_request.template_probes.templates), len(score_request.template_gallery.templates)), dtype=np.float32)
roc_probe_template = roc.roc_template()
roc_gallery_template = roc.roc_template()
sm_metric = roc.new_roc_similarity()
for p in range(0, len(score_request.template_probes.templates)):
self._rocUnFlatten(score_request.template_probes.templates[p].buffer, roc_probe_template)
for g in range(0, len(score_request.template_gallery.templates)):
self._rocUnFlatten(score_request.template_gallery.templates[g].buffer, roc_gallery_template)
roc.roc_compare_templates(roc_probe_template, roc_gallery_template, sm_metric)
sim_mat[(p, g)] = roc.roc_similarity_value(sm_metric)
roc.delete_roc_similarity(sm_metric)
roc.roc_free_template(roc_probe_template)
roc.roc_free_template(roc_gallery_template)
else:
NotImplementedError(('ScoreType %s is not implemented.' % (score_type,)))
sim_mat[(sim_mat == (- 1.0))] = 0.0
dist_mat = (1.0 - sim_mat)
return pt.matrix_np2proto(dist_mat)
|
def status(self):
'Return a simple status message.'
print('Handeling status request.')
status_message = fsd.FaceServiceInfo()
status_message.status = fsd.READY
status_message.detection_support = True
status_message.extract_support = True
status_message.score_support = False
status_message.score_type = self.scoreType()
status_message.algorithm = ('RankOne_%s' % roc.__file__)
status_message.detection_threshold = self.recommendedDetectionThreshold()
status_message.match_threshold = self.recommendedScoreThreshold()
return status_message
| -402,292,803,537,436,900
|
Return a simple status message.
|
src/faro/face_workers/RankOneFaceWorker.py
|
status
|
ORNL/faro
|
python
|
def status(self):
print('Handeling status request.')
status_message = fsd.FaceServiceInfo()
status_message.status = fsd.READY
status_message.detection_support = True
status_message.extract_support = True
status_message.score_support = False
status_message.score_type = self.scoreType()
status_message.algorithm = ('RankOne_%s' % roc.__file__)
status_message.detection_threshold = self.recommendedDetectionThreshold()
status_message.match_threshold = self.recommendedScoreThreshold()
return status_message
|
def recommendedDetectionThreshold(self):
'\n The false_detection_rate parameter specifies the allowable \n false positive rate for face detection.The suggested default \n value for false_detection_rate is 0.02 which corresponds to \n one false detection in 50 images on the FDDB benchmark. A \n higher false detection rate will correctly detect more faces \n at the cost of also incorrectly detecting more non-faces. \n The accepted range of values for false_detection_rate is \n between 0 to 1. Values outside this range will be modified \n to be at the aforementioned bounds automatically.\n \n '
return 0.02
| -550,635,710,567,225,800
|
The false_detection_rate parameter specifies the allowable
false positive rate for face detection.The suggested default
value for false_detection_rate is 0.02 which corresponds to
one false detection in 50 images on the FDDB benchmark. A
higher false detection rate will correctly detect more faces
at the cost of also incorrectly detecting more non-faces.
The accepted range of values for false_detection_rate is
between 0 to 1. Values outside this range will be modified
to be at the aforementioned bounds automatically.
|
src/faro/face_workers/RankOneFaceWorker.py
|
recommendedDetectionThreshold
|
ORNL/faro
|
python
|
def recommendedDetectionThreshold(self):
'\n The false_detection_rate parameter specifies the allowable \n false positive rate for face detection.The suggested default \n value for false_detection_rate is 0.02 which corresponds to \n one false detection in 50 images on the FDDB benchmark. A \n higher false detection rate will correctly detect more faces \n at the cost of also incorrectly detecting more non-faces. \n The accepted range of values for false_detection_rate is \n between 0 to 1. Values outside this range will be modified \n to be at the aforementioned bounds automatically.\n \n '
return 0.02
|
def recommendedScoreThreshold(self, far=(- 1)):
'Return the method used to create a score from the template.\n \n By default server computation is required.\n \n Should return a recommended score threshold.\n \n DLIB recommends a value of 0.6 for LFW dataset \n '
return 0.6
| 2,327,120,922,743,612,000
|
Return the method used to create a score from the template.
By default server computation is required.
Should return a recommended score threshold.
DLIB recommends a value of 0.6 for LFW dataset
|
src/faro/face_workers/RankOneFaceWorker.py
|
recommendedScoreThreshold
|
ORNL/faro
|
python
|
def recommendedScoreThreshold(self, far=(- 1)):
'Return the method used to create a score from the template.\n \n By default server computation is required.\n \n Should return a recommended score threshold.\n \n DLIB recommends a value of 0.6 for LFW dataset \n '
return 0.6
|
def present(save_fn: str, duration=120, n_trials=2010, iti=0.5, soa=3.0, jitter=0.2, volume=0.8, random_state=42, eeg=None, cf1=900, amf1=45, cf2=770, amf2=40.018, sample_rate=44100):
'\n\n Auditory SSAEP Experiment\n ===========================\n\n\n Parameters:\n -----------\n\n duration - duration of the recording in seconds (default 10)\n\n n_trials - number of trials (default 10)\n\n iti - intertrial interval (default 0.3)\n\n soa - stimulus onset asynchrony, = interval between end of stimulus\n and next trial (default 0.2)\n\n jitter - jitter in the intertrial intervals (default 0.2)\n\n secs - duration of the sound in seconds (default 0.2)\n\n volume - volume of the sounds in [0,1] (default 0.8)\n\n random_state - random seed (default 42)\n\n\n '
np.random.seed(random_state)
markernames = [1, 2]
record_duration = np.float32(duration)
am1 = generate_am_waveform(cf1, amf1, secs=soa, sample_rate=sample_rate)
am2 = generate_am_waveform(cf2, amf2, secs=soa, sample_rate=sample_rate)
aud1 = sound.Sound(am1, sampleRate=sample_rate)
aud1.setVolume(volume)
aud2 = sound.Sound(am2, sampleRate=sample_rate)
aud2.setVolume(volume)
auds = [aud1, aud2]
stim_freq = np.random.binomial(1, 0.5, n_trials)
itis = (iti + (np.random.rand(n_trials) * jitter))
trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))
trials['iti'] = itis
trials['soa'] = soa
mywin = visual.Window([1920, 1080], monitor='testMonitor', units='deg', fullscr=True)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
fixation.setAutoDraw(True)
mywin.flip()
show_instructions(10)
if eeg:
eeg.start(save_fn, duration=record_duration)
start = time()
for (ii, trial) in trials.iterrows():
core.wait((trial['iti'] + (np.random.randn() * jitter)))
ind = trials['stim_freq'].iloc[ii]
auds[ind].stop()
auds[ind].play()
if eeg:
timestamp = time()
if (eeg.backend == 'muselsl'):
marker = [markernames[ind]]
marker = list(map(int, marker))
else:
marker = markernames[ind]
eeg.push_sample(marker=marker, timestamp=timestamp)
mywin.flip()
core.wait(soa)
if (len(event.getKeys()) > 0):
break
if ((time() - start) > record_duration):
break
event.clearEvents()
if eeg:
eeg.stop()
mywin.close()
| -1,716,731,095,930,829,300
|
Auditory SSAEP Experiment
===========================
Parameters:
-----------
duration - duration of the recording in seconds (default 10)
n_trials - number of trials (default 10)
iti - intertrial interval (default 0.3)
soa - stimulus onset asynchrony, = interval between end of stimulus
and next trial (default 0.2)
jitter - jitter in the intertrial intervals (default 0.2)
secs - duration of the sound in seconds (default 0.2)
volume - volume of the sounds in [0,1] (default 0.8)
random_state - random seed (default 42)
|
eegnb/experiments/auditory_ssaep/ssaep.py
|
present
|
Neuroelektroteknia/eeg-notebooks
|
python
|
def present(save_fn: str, duration=120, n_trials=2010, iti=0.5, soa=3.0, jitter=0.2, volume=0.8, random_state=42, eeg=None, cf1=900, amf1=45, cf2=770, amf2=40.018, sample_rate=44100):
'\n\n Auditory SSAEP Experiment\n ===========================\n\n\n Parameters:\n -----------\n\n duration - duration of the recording in seconds (default 10)\n\n n_trials - number of trials (default 10)\n\n iti - intertrial interval (default 0.3)\n\n soa - stimulus onset asynchrony, = interval between end of stimulus\n and next trial (default 0.2)\n\n jitter - jitter in the intertrial intervals (default 0.2)\n\n secs - duration of the sound in seconds (default 0.2)\n\n volume - volume of the sounds in [0,1] (default 0.8)\n\n random_state - random seed (default 42)\n\n\n '
np.random.seed(random_state)
markernames = [1, 2]
record_duration = np.float32(duration)
am1 = generate_am_waveform(cf1, amf1, secs=soa, sample_rate=sample_rate)
am2 = generate_am_waveform(cf2, amf2, secs=soa, sample_rate=sample_rate)
aud1 = sound.Sound(am1, sampleRate=sample_rate)
aud1.setVolume(volume)
aud2 = sound.Sound(am2, sampleRate=sample_rate)
aud2.setVolume(volume)
auds = [aud1, aud2]
stim_freq = np.random.binomial(1, 0.5, n_trials)
itis = (iti + (np.random.rand(n_trials) * jitter))
trials = DataFrame(dict(stim_freq=stim_freq, timestamp=np.zeros(n_trials)))
trials['iti'] = itis
trials['soa'] = soa
mywin = visual.Window([1920, 1080], monitor='testMonitor', units='deg', fullscr=True)
fixation = visual.GratingStim(win=mywin, size=0.2, pos=[0, 0], sf=0, rgb=[1, 0, 0])
fixation.setAutoDraw(True)
mywin.flip()
show_instructions(10)
if eeg:
eeg.start(save_fn, duration=record_duration)
start = time()
for (ii, trial) in trials.iterrows():
core.wait((trial['iti'] + (np.random.randn() * jitter)))
ind = trials['stim_freq'].iloc[ii]
auds[ind].stop()
auds[ind].play()
if eeg:
timestamp = time()
if (eeg.backend == 'muselsl'):
marker = [markernames[ind]]
marker = list(map(int, marker))
else:
marker = markernames[ind]
eeg.push_sample(marker=marker, timestamp=timestamp)
mywin.flip()
core.wait(soa)
if (len(event.getKeys()) > 0):
break
if ((time() - start) > record_duration):
break
event.clearEvents()
if eeg:
eeg.stop()
mywin.close()
|
def generate_am_waveform(carrier_freq, am_freq, secs=1, sample_rate=None, am_type='gaussian', gaussian_std_ratio=8):
"Generate an amplitude-modulated waveform.\n\n Generate a sine wave amplitude-modulated by a second sine wave or a\n Gaussian envelope with standard deviation = period_AM/8.\n\n Args:\n carrier_freq (float): carrier wave frequency, in Hz\n am_freq (float): amplitude modulation frequency, in Hz\n\n Keyword Args:\n secs (float): duration of the stimulus, in seconds\n sample_rate (float): sampling rate of the sound, in Hz\n am_type (str): amplitude-modulation type\n 'gaussian' -> Gaussian with std defined by `gaussian_std`\n 'sine' -> sine wave\n gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.\n Ratio between AM period and std of the Gaussian envelope. E.g.,\n gaussian_std = 8 means the Gaussian window has 8 standard\n deviations around its mean inside one AM period.\n\n Returns:\n (numpy.ndarray): sound samples\n "
t = np.arange(0, secs, (1.0 / sample_rate))
if (am_type == 'gaussian'):
period = int((sample_rate / am_freq))
std = (period / gaussian_std_ratio)
norm_window = stats.norm.pdf(np.arange(period), (period / 2), std)
norm_window /= np.max(norm_window)
n_windows = int(np.ceil((secs * am_freq)))
am = np.tile(norm_window, n_windows)
am = am[:len(t)]
elif (am_type == 'sine'):
am = np.sin((((2 * np.pi) * am_freq) * t))
carrier = ((0.5 * np.sin((((2 * np.pi) * carrier_freq) * t))) + 0.5)
am_out = (carrier * am)
return am_out
| -6,215,639,081,792,676,000
|
Generate an amplitude-modulated waveform.
Generate a sine wave amplitude-modulated by a second sine wave or a
Gaussian envelope with standard deviation = period_AM/8.
Args:
carrier_freq (float): carrier wave frequency, in Hz
am_freq (float): amplitude modulation frequency, in Hz
Keyword Args:
secs (float): duration of the stimulus, in seconds
sample_rate (float): sampling rate of the sound, in Hz
am_type (str): amplitude-modulation type
'gaussian' -> Gaussian with std defined by `gaussian_std`
'sine' -> sine wave
gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.
Ratio between AM period and std of the Gaussian envelope. E.g.,
gaussian_std = 8 means the Gaussian window has 8 standard
deviations around its mean inside one AM period.
Returns:
(numpy.ndarray): sound samples
|
eegnb/experiments/auditory_ssaep/ssaep.py
|
generate_am_waveform
|
Neuroelektroteknia/eeg-notebooks
|
python
|
def generate_am_waveform(carrier_freq, am_freq, secs=1, sample_rate=None, am_type='gaussian', gaussian_std_ratio=8):
"Generate an amplitude-modulated waveform.\n\n Generate a sine wave amplitude-modulated by a second sine wave or a\n Gaussian envelope with standard deviation = period_AM/8.\n\n Args:\n carrier_freq (float): carrier wave frequency, in Hz\n am_freq (float): amplitude modulation frequency, in Hz\n\n Keyword Args:\n secs (float): duration of the stimulus, in seconds\n sample_rate (float): sampling rate of the sound, in Hz\n am_type (str): amplitude-modulation type\n 'gaussian' -> Gaussian with std defined by `gaussian_std`\n 'sine' -> sine wave\n gaussian_std_ratio (float): only used if `am_type` is 'gaussian'.\n Ratio between AM period and std of the Gaussian envelope. E.g.,\n gaussian_std = 8 means the Gaussian window has 8 standard\n deviations around its mean inside one AM period.\n\n Returns:\n (numpy.ndarray): sound samples\n "
t = np.arange(0, secs, (1.0 / sample_rate))
if (am_type == 'gaussian'):
period = int((sample_rate / am_freq))
std = (period / gaussian_std_ratio)
norm_window = stats.norm.pdf(np.arange(period), (period / 2), std)
norm_window /= np.max(norm_window)
n_windows = int(np.ceil((secs * am_freq)))
am = np.tile(norm_window, n_windows)
am = am[:len(t)]
elif (am_type == 'sine'):
am = np.sin((((2 * np.pi) * am_freq) * t))
carrier = ((0.5 * np.sin((((2 * np.pi) * carrier_freq) * t))) + 0.5)
am_out = (carrier * am)
return am_out
|
def get_script_name(environ):
"\n Returns the equivalent of the HTTP request's SCRIPT_NAME environment\n variable. If Apache mod_rewrite has been used, returns what would have been\n the script name prior to any rewriting (so it's the script name as seen\n from the client's perspective), unless the FORCE_SCRIPT_NAME setting is\n set (to anything).\n "
from django.conf import settings
if (settings.FORCE_SCRIPT_NAME is not None):
return force_text(settings.FORCE_SCRIPT_NAME)
script_url = environ.get('SCRIPT_URL', '')
if (not script_url):
script_url = environ.get('REDIRECT_URL', '')
if script_url:
return force_text(script_url[:(- len(environ.get('PATH_INFO', '')))])
return force_text(environ.get('SCRIPT_NAME', ''))
| -4,577,672,714,947,128,300
|
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
|
django/core/handlers/base.py
|
get_script_name
|
chalkchisel/django
|
python
|
def get_script_name(environ):
"\n Returns the equivalent of the HTTP request's SCRIPT_NAME environment\n variable. If Apache mod_rewrite has been used, returns what would have been\n the script name prior to any rewriting (so it's the script name as seen\n from the client's perspective), unless the FORCE_SCRIPT_NAME setting is\n set (to anything).\n "
from django.conf import settings
if (settings.FORCE_SCRIPT_NAME is not None):
return force_text(settings.FORCE_SCRIPT_NAME)
script_url = environ.get('SCRIPT_URL', )
if (not script_url):
script_url = environ.get('REDIRECT_URL', )
if script_url:
return force_text(script_url[:(- len(environ.get('PATH_INFO', )))])
return force_text(environ.get('SCRIPT_NAME', ))
|
def load_middleware(self):
'\n Populate middleware lists from settings.MIDDLEWARE_CLASSES.\n\n Must be called after the environment is fixed (see __call__ in subclasses).\n '
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
(mw_module, mw_classname) = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured(("%s isn't a middleware module" % middleware_path))
try:
mod = import_module(mw_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured(('Error importing middleware %s: "%s"' % (mw_module, e)))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
self._request_middleware = request_middleware
| 3,131,384,541,514,060,000
|
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
|
django/core/handlers/base.py
|
load_middleware
|
chalkchisel/django
|
python
|
def load_middleware(self):
'\n Populate middleware lists from settings.MIDDLEWARE_CLASSES.\n\n Must be called after the environment is fixed (see __call__ in subclasses).\n '
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
(mw_module, mw_classname) = middleware_path.rsplit('.', 1)
except ValueError:
raise exceptions.ImproperlyConfigured(("%s isn't a middleware module" % middleware_path))
try:
mod = import_module(mw_module)
except ImportError as e:
raise exceptions.ImproperlyConfigured(('Error importing middleware %s: "%s"' % (mw_module, e)))
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured(('Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)))
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
self._request_middleware = request_middleware
|
def get_response(self, request):
'Returns an HttpResponse object for the given HttpRequest'
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver('^/', urlconf)
try:
response = None
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if (response is None):
if hasattr(request, 'urlconf'):
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver('^/', urlconf)
(callback, callback_args, callback_kwargs) = resolver.resolve(request.path_info)
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if (response is None):
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception as e:
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if (response is None):
raise
if (response is None):
if isinstance(callback, types.FunctionType):
view_name = callback.__name__
else:
view_name = (callback.__class__.__name__ + '.__call__')
raise ValueError(("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)))
if (hasattr(response, 'render') and callable(response.render)):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path, extra={'status_code': 404, 'request': request})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
(callback, param_dict) = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except exceptions.PermissionDenied:
logger.warning('Forbidden (Permission denied): %s', request.path, extra={'status_code': 403, 'request': request})
try:
(callback, param_dict) = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except SystemExit:
raise
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
urlresolvers.set_urlconf(None)
try:
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
| 6,400,287,607,290,851,000
|
Returns an HttpResponse object for the given HttpRequest
|
django/core/handlers/base.py
|
get_response
|
chalkchisel/django
|
python
|
def get_response(self, request):
from django.core import exceptions, urlresolvers
from django.conf import settings
try:
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver('^/', urlconf)
try:
response = None
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if (response is None):
if hasattr(request, 'urlconf'):
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver('^/', urlconf)
(callback, callback_args, callback_kwargs) = resolver.resolve(request.path_info)
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if (response is None):
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception as e:
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if (response is None):
raise
if (response is None):
if isinstance(callback, types.FunctionType):
view_name = callback.__name__
else:
view_name = (callback.__class__.__name__ + '.__call__')
raise ValueError(("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)))
if (hasattr(response, 'render') and callable(response.render)):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path, extra={'status_code': 404, 'request': request})
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
(callback, param_dict) = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except exceptions.PermissionDenied:
logger.warning('Forbidden (Permission denied): %s', request.path, extra={'status_code': 403, 'request': request})
try:
(callback, param_dict) = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except SystemExit:
raise
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
urlresolvers.set_urlconf(None)
try:
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
|
def handle_uncaught_exception(self, request, resolver, exc_info):
'\n Processing for any otherwise uncaught exceptions (those that will\n generate HTTP 500 responses). Can be overridden by subclasses who want\n customised 500 handling.\n\n Be *very* careful when overriding this because the error could be\n caused by anything, so assuming something like the database is always\n available would be an error.\n '
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path, exc_info=exc_info, extra={'status_code': 500, 'request': request})
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
if (resolver.urlconf_module is None):
six.reraise(*exc_info)
(callback, param_dict) = resolver.resolve500()
return callback(request, **param_dict)
| 1,751,742,861,078,187,500
|
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
|
django/core/handlers/base.py
|
handle_uncaught_exception
|
chalkchisel/django
|
python
|
def handle_uncaught_exception(self, request, resolver, exc_info):
'\n Processing for any otherwise uncaught exceptions (those that will\n generate HTTP 500 responses). Can be overridden by subclasses who want\n customised 500 handling.\n\n Be *very* careful when overriding this because the error could be\n caused by anything, so assuming something like the database is always\n available would be an error.\n '
from django.conf import settings
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path, exc_info=exc_info, extra={'status_code': 500, 'request': request})
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
if (resolver.urlconf_module is None):
six.reraise(*exc_info)
(callback, param_dict) = resolver.resolve500()
return callback(request, **param_dict)
|
def apply_response_fixes(self, request, response):
'\n Applies each of the functions in self.response_fixes to the request and\n response, modifying the response in the process. Returns the new\n response.\n '
for func in self.response_fixes:
response = func(request, response)
return response
| -1,219,089,826,869,694,000
|
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
|
django/core/handlers/base.py
|
apply_response_fixes
|
chalkchisel/django
|
python
|
def apply_response_fixes(self, request, response):
'\n Applies each of the functions in self.response_fixes to the request and\n response, modifying the response in the process. Returns the new\n response.\n '
for func in self.response_fixes:
response = func(request, response)
return response
|
def _construct_simple(coeffs, opt):
'Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains. '
(result, rationals, reals, algebraics) = ({}, False, False, False)
if (opt.extension is True):
is_algebraic = (lambda coeff: ask(Q.algebraic(coeff)))
else:
is_algebraic = (lambda coeff: False)
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
if (not algebraics):
reals = True
else:
return False
elif is_algebraic(coeff):
if (not reals):
algebraics = True
else:
return False
else:
return None
if algebraics:
(domain, result) = _construct_algebraic(coeffs, opt)
else:
if reals:
domain = RR
elif (opt.field or rationals):
domain = QQ
else:
domain = ZZ
result = []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return (domain, result)
| -4,415,115,649,839,476,700
|
Handle simple domains, e.g.: ZZ, QQ, RR and algebraic domains.
|
sympy/polys/constructor.py
|
_construct_simple
|
jegerjensen/sympy
|
python
|
def _construct_simple(coeffs, opt):
' '
(result, rationals, reals, algebraics) = ({}, False, False, False)
if (opt.extension is True):
is_algebraic = (lambda coeff: ask(Q.algebraic(coeff)))
else:
is_algebraic = (lambda coeff: False)
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
if (not algebraics):
reals = True
else:
return False
elif is_algebraic(coeff):
if (not reals):
algebraics = True
else:
return False
else:
return None
if algebraics:
(domain, result) = _construct_algebraic(coeffs, opt)
else:
if reals:
domain = RR
elif (opt.field or rationals):
domain = QQ
else:
domain = ZZ
result = []
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return (domain, result)
|
def _construct_algebraic(coeffs, opt):
'We know that coefficients are algebraic so construct the extension. '
from sympy.polys.numberfields import primitive_element
(result, exts) = ([], set([]))
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
(g, span, H) = primitive_element(exts, ex=True, polys=True)
root = sum([(s * ext) for (s, ext) in zip(span, exts)])
(domain, g) = (QQ.algebraic_field((g, root)), g.rep.rep)
for (i, (coeff, a, b)) in enumerate(result):
if (coeff is not None):
coeff = ((a * domain.dtype.from_list(H[exts.index(coeff)], g, QQ)) + b)
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return (domain, result)
| -7,388,822,707,878,778,000
|
We know that coefficients are algebraic so construct the extension.
|
sympy/polys/constructor.py
|
_construct_algebraic
|
jegerjensen/sympy
|
python
|
def _construct_algebraic(coeffs, opt):
' '
from sympy.polys.numberfields import primitive_element
(result, exts) = ([], set([]))
for coeff in coeffs:
if coeff.is_Rational:
coeff = (None, 0, QQ.from_sympy(coeff))
else:
a = coeff.as_coeff_add()[0]
coeff -= a
b = coeff.as_coeff_mul()[0]
coeff /= b
exts.add(coeff)
a = QQ.from_sympy(a)
b = QQ.from_sympy(b)
coeff = (coeff, b, a)
result.append(coeff)
exts = list(exts)
(g, span, H) = primitive_element(exts, ex=True, polys=True)
root = sum([(s * ext) for (s, ext) in zip(span, exts)])
(domain, g) = (QQ.algebraic_field((g, root)), g.rep.rep)
for (i, (coeff, a, b)) in enumerate(result):
if (coeff is not None):
coeff = ((a * domain.dtype.from_list(H[exts.index(coeff)], g, QQ)) + b)
else:
coeff = domain.dtype.from_list([b], g, QQ)
result[i] = coeff
return (domain, result)
|
def _construct_composite(coeffs, opt):
'Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X). '
(numers, denoms) = ([], [])
for coeff in coeffs:
(numer, denom) = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
(polys, gens) = parallel_dict_from_basic((numers + denoms))
if any((gen.is_number for gen in gens)):
return None
n = len(gens)
k = (len(polys) // 2)
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
(fractions, zeros) = (False, ((0,) * n))
for denom in denoms:
if ((len(denom) > 1) or (zeros not in denom)):
fractions = True
break
coeffs = set([])
if (not fractions):
for (numer, denom) in zip(numers, denoms):
denom = denom[zeros]
for (monom, coeff) in numer.iteritems():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for (numer, denom) in zip(numers, denoms):
coeffs.update(numer.values())
coeffs.update(denom.values())
(rationals, reals) = (False, False)
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
reals = True
break
if reals:
ground = RR
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if (not fractions):
domain = ground.poly_ring(*gens)
for numer in numers:
for (monom, coeff) in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for (numer, denom) in zip(numers, denoms):
for (monom, coeff) in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
for (monom, coeff) in denom.iteritems():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return (domain, result)
| -7,897,564,059,083,854,000
|
Handle composite domains, e.g.: ZZ[X], QQ[X], ZZ(X), QQ(X).
|
sympy/polys/constructor.py
|
_construct_composite
|
jegerjensen/sympy
|
python
|
def _construct_composite(coeffs, opt):
' '
(numers, denoms) = ([], [])
for coeff in coeffs:
(numer, denom) = coeff.as_numer_denom()
numers.append(numer)
denoms.append(denom)
(polys, gens) = parallel_dict_from_basic((numers + denoms))
if any((gen.is_number for gen in gens)):
return None
n = len(gens)
k = (len(polys) // 2)
numers = polys[:k]
denoms = polys[k:]
if opt.field:
fractions = True
else:
(fractions, zeros) = (False, ((0,) * n))
for denom in denoms:
if ((len(denom) > 1) or (zeros not in denom)):
fractions = True
break
coeffs = set([])
if (not fractions):
for (numer, denom) in zip(numers, denoms):
denom = denom[zeros]
for (monom, coeff) in numer.iteritems():
coeff /= denom
coeffs.add(coeff)
numer[monom] = coeff
else:
for (numer, denom) in zip(numers, denoms):
coeffs.update(numer.values())
coeffs.update(denom.values())
(rationals, reals) = (False, False)
for coeff in coeffs:
if coeff.is_Rational:
if (not coeff.is_Integer):
rationals = True
elif coeff.is_Float:
reals = True
break
if reals:
ground = RR
elif rationals:
ground = QQ
else:
ground = ZZ
result = []
if (not fractions):
domain = ground.poly_ring(*gens)
for numer in numers:
for (monom, coeff) in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
result.append(domain(numer))
else:
domain = ground.frac_field(*gens)
for (numer, denom) in zip(numers, denoms):
for (monom, coeff) in numer.iteritems():
numer[monom] = ground.from_sympy(coeff)
for (monom, coeff) in denom.iteritems():
denom[monom] = ground.from_sympy(coeff)
result.append(domain((numer, denom)))
return (domain, result)
|
def _construct_expression(coeffs, opt):
'The last resort case, i.e. use the expression domain. '
(domain, result) = (EX, [])
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return (domain, result)
| 8,277,807,598,530,472,000
|
The last resort case, i.e. use the expression domain.
|
sympy/polys/constructor.py
|
_construct_expression
|
jegerjensen/sympy
|
python
|
def _construct_expression(coeffs, opt):
' '
(domain, result) = (EX, [])
for coeff in coeffs:
result.append(domain.from_sympy(coeff))
return (domain, result)
|
def construct_domain(obj, **args):
'Construct a minimal domain for the list of coefficients. '
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
(monoms, coeffs) = zip(*obj.items())
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = map(sympify, coeffs)
result = _construct_simple(coeffs, opt)
if (result is not None):
if (result is not False):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
else:
if opt.composite:
result = _construct_composite(coeffs, opt)
else:
result = None
if (result is not None):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return (domain, dict(zip(monoms, coeffs)))
else:
return (domain, coeffs)
else:
return (domain, coeffs[0])
| 8,700,327,246,323,492,000
|
Construct a minimal domain for the list of coefficients.
|
sympy/polys/constructor.py
|
construct_domain
|
jegerjensen/sympy
|
python
|
def construct_domain(obj, **args):
' '
opt = build_options(args)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
(monoms, coeffs) = zip(*obj.items())
else:
coeffs = obj
else:
coeffs = [obj]
coeffs = map(sympify, coeffs)
result = _construct_simple(coeffs, opt)
if (result is not None):
if (result is not False):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
else:
if opt.composite:
result = _construct_composite(coeffs, opt)
else:
result = None
if (result is not None):
(domain, coeffs) = result
else:
(domain, coeffs) = _construct_expression(coeffs, opt)
if hasattr(obj, '__iter__'):
if isinstance(obj, dict):
return (domain, dict(zip(monoms, coeffs)))
else:
return (domain, coeffs)
else:
return (domain, coeffs[0])
|
def send_osc_message(self, osc_datagram, address, port):
'Send OSC message via UDP.'
self.sock.sendto(osc_datagram, (address, port))
| 850,267,935,205,644,000
|
Send OSC message via UDP.
|
robojam/tiny_performance_player.py
|
send_osc_message
|
cpmpercussion/robojam
|
python
|
def send_osc_message(self, osc_datagram, address, port):
self.sock.sendto(osc_datagram, (address, port))
|
def pad_dgram_four_bytes(self, dgram):
'Pad a datagram up to a multiple of 4 bytes.'
return (dgram + (b'\x00' * (4 - (len(dgram) % 4))))
| 4,929,957,407,282,971,000
|
Pad a datagram up to a multiple of 4 bytes.
|
robojam/tiny_performance_player.py
|
pad_dgram_four_bytes
|
cpmpercussion/robojam
|
python
|
def pad_dgram_four_bytes(self, dgram):
return (dgram + (b'\x00' * (4 - (len(dgram) % 4))))
|
def setSynth(self, instrument='strings', address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
'Sends an OSC message to set the synth instrument.'
dgram = b''
dgram += self.pad_dgram_four_bytes('/inst'.encode('utf-8'))
dgram += self.pad_dgram_four_bytes(',s')
dgram += self.pad_dgram_four_bytes(instrument.encode('utf-8'))
self.send_osc_message(dgram, address, port)
| -3,863,916,716,206,835,700
|
Sends an OSC message to set the synth instrument.
|
robojam/tiny_performance_player.py
|
setSynth
|
cpmpercussion/robojam
|
python
|
def setSynth(self, instrument='strings', address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
dgram = b
dgram += self.pad_dgram_four_bytes('/inst'.encode('utf-8'))
dgram += self.pad_dgram_four_bytes(',s')
dgram += self.pad_dgram_four_bytes(instrument.encode('utf-8'))
self.send_osc_message(dgram, address, port)
|
def setSynthRandom(self):
'Choose a random synth for performance playback'
self.setSynth(random.choice(['chirp', 'keys', 'drums', 'strings']))
| -7,457,364,301,034,360,000
|
Choose a random synth for performance playback
|
robojam/tiny_performance_player.py
|
setSynthRandom
|
cpmpercussion/robojam
|
python
|
def setSynthRandom(self):
self.setSynth(random.choice(['chirp', 'keys', 'drums', 'strings']))
|
def sendTouch(self, x, y, z, address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
'Sends an OSC message to trigger a touch sound.'
dgram = b''
dgram += self.pad_dgram_four_bytes('/touch'.encode('utf-8'))
dgram += self.pad_dgram_four_bytes(',sfsfsf')
dgram += self.pad_dgram_four_bytes('/x'.encode('utf-8'))
dgram += struct.pack('>f', x)
dgram += self.pad_dgram_four_bytes('/y'.encode('utf-8'))
dgram += struct.pack('>f', y)
dgram += self.pad_dgram_four_bytes('/z'.encode('utf-8'))
dgram += struct.pack('>f', z)
self.send_osc_message(dgram, address, port)
| -7,923,912,209,772,948,000
|
Sends an OSC message to trigger a touch sound.
|
robojam/tiny_performance_player.py
|
sendTouch
|
cpmpercussion/robojam
|
python
|
def sendTouch(self, x, y, z, address=DEFAULT_OSC_ADDRESS, port=DEFAULT_OSC_PORT):
dgram = b
dgram += self.pad_dgram_four_bytes('/touch'.encode('utf-8'))
dgram += self.pad_dgram_four_bytes(',sfsfsf')
dgram += self.pad_dgram_four_bytes('/x'.encode('utf-8'))
dgram += struct.pack('>f', x)
dgram += self.pad_dgram_four_bytes('/y'.encode('utf-8'))
dgram += struct.pack('>f', y)
dgram += self.pad_dgram_four_bytes('/z'.encode('utf-8'))
dgram += struct.pack('>f', z)
self.send_osc_message(dgram, address, port)
|
def playPerformance(self, perf_df):
'Schedule performance of a tiny performance dataframe.'
for row in perf_df.iterrows():
Timer(row[0], self.sendTouch, args=[row[1].x, row[1].y, row[1].z]).start()
| 861,108,620,275,963,300
|
Schedule performance of a tiny performance dataframe.
|
robojam/tiny_performance_player.py
|
playPerformance
|
cpmpercussion/robojam
|
python
|
def playPerformance(self, perf_df):
for row in perf_df.iterrows():
Timer(row[0], self.sendTouch, args=[row[1].x, row[1].y, row[1].z]).start()
|
def get_loader(data_source: Iterable[dict], open_fn: Callable, dict_transform: Callable=None, sampler=None, collate_fn: Callable=default_collate_fn, batch_size: int=32, num_workers: int=4, shuffle: bool=False, drop_last: bool=False):
'Creates a DataLoader from given source and its open/transform params.\n\n Args:\n data_source (Iterable[dict]): and iterable containing your\n data annotations,\n (for example path to images, labels, bboxes, etc)\n open_fn (Callable): function, that can open your\n annotations dict and\n transfer it to data, needed by your network\n (for example open image by path, or tokenize read string)\n dict_transform (callable): transforms to use on dict\n (for example normalize image, add blur, crop/resize/etc)\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset\n collate_fn (callable, optional): merges a list of samples to form a\n mini-batch of Tensor(s). Used when using batched loading from a\n map-style dataset\n batch_size (int, optional): how many samples per batch to load\n num_workers (int, optional): how many subprocesses to use for data\n loading. ``0`` means that the data will be loaded\n in the main process\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: ``False``).\n drop_last (bool, optional): set to ``True`` to drop\n the last incomplete batch, if the dataset size is not divisible\n by the batch size. If ``False`` and the size of dataset\n is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n\n Returns:\n DataLoader with ``catalyst.data.ListDataset``\n '
dataset = ListDataset(list_data=data_source, open_fn=open_fn, dict_transform=dict_transform)
loader = torch.utils.data.DataLoader(dataset=dataset, sampler=sampler, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=torch.cuda.is_available(), drop_last=drop_last)
return loader
| 4,295,637,937,727,917,600
|
Creates a DataLoader from given source and its open/transform params.
Args:
data_source (Iterable[dict]): and iterable containing your
data annotations,
(for example path to images, labels, bboxes, etc)
open_fn (Callable): function, that can open your
annotations dict and
transfer it to data, needed by your network
(for example open image by path, or tokenize read string)
dict_transform (callable): transforms to use on dict
(for example normalize image, add blur, crop/resize/etc)
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset
collate_fn (callable, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a
map-style dataset
batch_size (int, optional): how many samples per batch to load
num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded
in the main process
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: ``False``).
drop_last (bool, optional): set to ``True`` to drop
the last incomplete batch, if the dataset size is not divisible
by the batch size. If ``False`` and the size of dataset
is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
Returns:
DataLoader with ``catalyst.data.ListDataset``
|
catalyst/dl/utils/torch.py
|
get_loader
|
Inkln/catalyst
|
python
|
def get_loader(data_source: Iterable[dict], open_fn: Callable, dict_transform: Callable=None, sampler=None, collate_fn: Callable=default_collate_fn, batch_size: int=32, num_workers: int=4, shuffle: bool=False, drop_last: bool=False):
'Creates a DataLoader from given source and its open/transform params.\n\n Args:\n data_source (Iterable[dict]): and iterable containing your\n data annotations,\n (for example path to images, labels, bboxes, etc)\n open_fn (Callable): function, that can open your\n annotations dict and\n transfer it to data, needed by your network\n (for example open image by path, or tokenize read string)\n dict_transform (callable): transforms to use on dict\n (for example normalize image, add blur, crop/resize/etc)\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset\n collate_fn (callable, optional): merges a list of samples to form a\n mini-batch of Tensor(s). Used when using batched loading from a\n map-style dataset\n batch_size (int, optional): how many samples per batch to load\n num_workers (int, optional): how many subprocesses to use for data\n loading. ``0`` means that the data will be loaded\n in the main process\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: ``False``).\n drop_last (bool, optional): set to ``True`` to drop\n the last incomplete batch, if the dataset size is not divisible\n by the batch size. If ``False`` and the size of dataset\n is not divisible by the batch size, then the last batch\n will be smaller. (default: ``False``)\n\n Returns:\n DataLoader with ``catalyst.data.ListDataset``\n '
dataset = ListDataset(list_data=data_source, open_fn=open_fn, dict_transform=dict_transform)
loader = torch.utils.data.DataLoader(dataset=dataset, sampler=sampler, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers, shuffle=shuffle, pin_memory=torch.cuda.is_available(), drop_last=drop_last)
return loader
|
def template_to_descriptor(template: AttributeTemplate, *, headers: List[str]=[]) -> Descriptor:
'\n Convert a GEMD attribute template into an AI Engine Descriptor.\n\n IntBounds cannot be converted because they have no matching descriptor type.\n CompositionBounds can only be converted when every component is an element, in which case\n they are converted to ChemicalFormulaDescriptors.\n\n Parameters\n ----------\n template: AttributeTemplate\n Template to convert into a descriptor\n headers: List[str]\n Names of parent relationships to includes as prefixes\n to the template name in the descriptor key\n Default: []\n\n Returns\n -------\n Descriptor\n Descriptor with a key matching the template name and type corresponding to the bounds\n\n '
headers = (headers + [template.name])
descriptor_key = '~'.join(headers)
bounds = template.bounds
if isinstance(bounds, RealBounds):
return RealDescriptor(key=descriptor_key, lower_bound=bounds.lower_bound, upper_bound=bounds.upper_bound, units=bounds.default_units)
if isinstance(bounds, CategoricalBounds):
return CategoricalDescriptor(key=descriptor_key, categories=bounds.categories)
if isinstance(bounds, MolecularStructureBounds):
return MolecularStructureDescriptor(key=descriptor_key)
if isinstance(bounds, CompositionBounds):
if set(bounds.components).issubset(EmpiricalFormula.all_elements()):
return ChemicalFormulaDescriptor(key=descriptor_key)
else:
msg = 'Cannot create descriptor for CompositionBounds with non-atomic components'
raise NoEquivalentDescriptorError(msg)
if isinstance(bounds, IntegerBounds):
raise NoEquivalentDescriptorError('Cannot create a descriptor for integer-valued data')
raise ValueError('Template has unrecognized bounds: {}'.format(type(bounds)))
| 1,347,197,254,586,883,000
|
Convert a GEMD attribute template into an AI Engine Descriptor.
IntBounds cannot be converted because they have no matching descriptor type.
CompositionBounds can only be converted when every component is an element, in which case
they are converted to ChemicalFormulaDescriptors.
Parameters
----------
template: AttributeTemplate
Template to convert into a descriptor
headers: List[str]
Names of parent relationships to includes as prefixes
to the template name in the descriptor key
Default: []
Returns
-------
Descriptor
Descriptor with a key matching the template name and type corresponding to the bounds
|
src/citrine/builders/descriptors.py
|
template_to_descriptor
|
CitrineInformatics/citrine-python
|
python
|
def template_to_descriptor(template: AttributeTemplate, *, headers: List[str]=[]) -> Descriptor:
'\n Convert a GEMD attribute template into an AI Engine Descriptor.\n\n IntBounds cannot be converted because they have no matching descriptor type.\n CompositionBounds can only be converted when every component is an element, in which case\n they are converted to ChemicalFormulaDescriptors.\n\n Parameters\n ----------\n template: AttributeTemplate\n Template to convert into a descriptor\n headers: List[str]\n Names of parent relationships to includes as prefixes\n to the template name in the descriptor key\n Default: []\n\n Returns\n -------\n Descriptor\n Descriptor with a key matching the template name and type corresponding to the bounds\n\n '
headers = (headers + [template.name])
descriptor_key = '~'.join(headers)
bounds = template.bounds
if isinstance(bounds, RealBounds):
return RealDescriptor(key=descriptor_key, lower_bound=bounds.lower_bound, upper_bound=bounds.upper_bound, units=bounds.default_units)
if isinstance(bounds, CategoricalBounds):
return CategoricalDescriptor(key=descriptor_key, categories=bounds.categories)
if isinstance(bounds, MolecularStructureBounds):
return MolecularStructureDescriptor(key=descriptor_key)
if isinstance(bounds, CompositionBounds):
if set(bounds.components).issubset(EmpiricalFormula.all_elements()):
return ChemicalFormulaDescriptor(key=descriptor_key)
else:
msg = 'Cannot create descriptor for CompositionBounds with non-atomic components'
raise NoEquivalentDescriptorError(msg)
if isinstance(bounds, IntegerBounds):
raise NoEquivalentDescriptorError('Cannot create a descriptor for integer-valued data')
raise ValueError('Template has unrecognized bounds: {}'.format(type(bounds)))
|
@staticmethod
def from_templates(*, project: Project, scope: str):
'\n Build a PlatformVocabulary from the templates visible to a project.\n\n All of the templates with the given scope are downloaded and converted into descriptors.\n The uid values associated with that scope are used as the index into the dictionary.\n For example, using scope "my_templates" with a template with\n uids={"my_templates": "density"} would be indexed into the dictionary as "density".\n\n Parameters\n ----------\n project: Project\n Project on the Citrine Platform to read templates from\n scope: str\n Unique ID scope from which to pull the template names\n\n Returns\n -------\n PlatformVocabulary\n\n '
def _from_collection(collection: DataConceptsCollection):
return {x.uids[scope]: x for x in collection.list() if (scope in x.uids)}
properties = _from_collection(project.property_templates)
parameters = _from_collection(project.parameter_templates)
conditions = _from_collection(project.condition_templates)
res = {}
for (k, v) in chain(properties.items(), parameters.items(), conditions.items()):
try:
desc = template_to_descriptor(v)
res[k] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
| -2,609,227,021,841,906,000
|
Build a PlatformVocabulary from the templates visible to a project.
All of the templates with the given scope are downloaded and converted into descriptors.
The uid values associated with that scope are used as the index into the dictionary.
For example, using scope "my_templates" with a template with
uids={"my_templates": "density"} would be indexed into the dictionary as "density".
Parameters
----------
project: Project
Project on the Citrine Platform to read templates from
scope: str
Unique ID scope from which to pull the template names
Returns
-------
PlatformVocabulary
|
src/citrine/builders/descriptors.py
|
from_templates
|
CitrineInformatics/citrine-python
|
python
|
@staticmethod
def from_templates(*, project: Project, scope: str):
'\n Build a PlatformVocabulary from the templates visible to a project.\n\n All of the templates with the given scope are downloaded and converted into descriptors.\n The uid values associated with that scope are used as the index into the dictionary.\n For example, using scope "my_templates" with a template with\n uids={"my_templates": "density"} would be indexed into the dictionary as "density".\n\n Parameters\n ----------\n project: Project\n Project on the Citrine Platform to read templates from\n scope: str\n Unique ID scope from which to pull the template names\n\n Returns\n -------\n PlatformVocabulary\n\n '
def _from_collection(collection: DataConceptsCollection):
return {x.uids[scope]: x for x in collection.list() if (scope in x.uids)}
properties = _from_collection(project.property_templates)
parameters = _from_collection(project.parameter_templates)
conditions = _from_collection(project.condition_templates)
res = {}
for (k, v) in chain(properties.items(), parameters.items(), conditions.items()):
try:
desc = template_to_descriptor(v)
res[k] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
|
@staticmethod
def from_material(*, project: Project, material: Union[(str, UUID, LinkByUID, MaterialRun)], mode: AutoConfigureMode=AutoConfigureMode.PLAIN, full_history: bool=True):
"[ALPHA] Build a PlatformVocabulary from templates appearing in a material history.\n\n All of the attribute templates that appear throughout the material's history\n are extracted and converted into descriptors.\n\n Descriptor keys are formatted according to the option set by mode.\n For example, if a condition template with name 'Condition 1'\n appears in a parent process with name 'Parent',\n the mode option produces the following descriptor key:\n\n mode = AutoConfigMode.PLAIN --> 'Parent~Condition 1'\n mode = AutoConfigMode.FORMULATION --> 'Condition 1'\n\n Parameters\n ----------\n project: Project\n Project to use when accessing the Citrine Platform.\n material: Union[str, UUID, LinkByUID, MaterialRun]\n A representation of the material to extract descriptors from.\n mode: AutoConfigureMode\n Formatting option for descriptor keys in the platform vocabulary.\n Option AutoConfigMode.PLAIN includes headers from the parent object,\n whereas option AutoConfigMode.FORMULATION does not.\n Default: AutoConfigureMode.PLAIN\n full_history: bool\n Whether to extract descriptors from the full material history,\n or only the provided (terminal) material.\n Default: True\n\n Returns\n -------\n PlatformVocabulary\n\n "
if (not isinstance(mode, AutoConfigureMode)):
raise TypeError('mode must be an option from AutoConfigureMode')
history = project.material_runs.get_history(id=material)
if full_history:
search_history = recursive_flatmap(history, (lambda x: [x]), unidirectional=False)
set_uuids(search_history, 'id')
else:
search_history = [history.spec.template, history.process.template]
search_history.extend([msr.template for msr in history.measurements])
search_history = [x for x in search_history if (x is not None)]
res = {}
for obj in search_history:
templates = []
if isinstance(obj, HasPropertyTemplates):
for property in obj.properties:
templates.append(property[0])
if isinstance(obj, HasConditionTemplates):
for condition in obj.conditions:
templates.append(condition[0])
if isinstance(obj, HasParameterTemplates):
for parameter in obj.parameters:
templates.append(parameter[0])
headers = []
if (mode == AutoConfigureMode.PLAIN):
headers.append(obj.name)
for tmpl in templates:
try:
desc = template_to_descriptor(tmpl, headers=headers)
res[desc.key] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
| 5,403,301,014,726,067,000
|
[ALPHA] Build a PlatformVocabulary from templates appearing in a material history.
All of the attribute templates that appear throughout the material's history
are extracted and converted into descriptors.
Descriptor keys are formatted according to the option set by mode.
For example, if a condition template with name 'Condition 1'
appears in a parent process with name 'Parent',
the mode option produces the following descriptor key:
mode = AutoConfigMode.PLAIN --> 'Parent~Condition 1'
mode = AutoConfigMode.FORMULATION --> 'Condition 1'
Parameters
----------
project: Project
Project to use when accessing the Citrine Platform.
material: Union[str, UUID, LinkByUID, MaterialRun]
A representation of the material to extract descriptors from.
mode: AutoConfigureMode
Formatting option for descriptor keys in the platform vocabulary.
Option AutoConfigMode.PLAIN includes headers from the parent object,
whereas option AutoConfigMode.FORMULATION does not.
Default: AutoConfigureMode.PLAIN
full_history: bool
Whether to extract descriptors from the full material history,
or only the provided (terminal) material.
Default: True
Returns
-------
PlatformVocabulary
|
src/citrine/builders/descriptors.py
|
from_material
|
CitrineInformatics/citrine-python
|
python
|
@staticmethod
def from_material(*, project: Project, material: Union[(str, UUID, LinkByUID, MaterialRun)], mode: AutoConfigureMode=AutoConfigureMode.PLAIN, full_history: bool=True):
"[ALPHA] Build a PlatformVocabulary from templates appearing in a material history.\n\n All of the attribute templates that appear throughout the material's history\n are extracted and converted into descriptors.\n\n Descriptor keys are formatted according to the option set by mode.\n For example, if a condition template with name 'Condition 1'\n appears in a parent process with name 'Parent',\n the mode option produces the following descriptor key:\n\n mode = AutoConfigMode.PLAIN --> 'Parent~Condition 1'\n mode = AutoConfigMode.FORMULATION --> 'Condition 1'\n\n Parameters\n ----------\n project: Project\n Project to use when accessing the Citrine Platform.\n material: Union[str, UUID, LinkByUID, MaterialRun]\n A representation of the material to extract descriptors from.\n mode: AutoConfigureMode\n Formatting option for descriptor keys in the platform vocabulary.\n Option AutoConfigMode.PLAIN includes headers from the parent object,\n whereas option AutoConfigMode.FORMULATION does not.\n Default: AutoConfigureMode.PLAIN\n full_history: bool\n Whether to extract descriptors from the full material history,\n or only the provided (terminal) material.\n Default: True\n\n Returns\n -------\n PlatformVocabulary\n\n "
if (not isinstance(mode, AutoConfigureMode)):
raise TypeError('mode must be an option from AutoConfigureMode')
history = project.material_runs.get_history(id=material)
if full_history:
search_history = recursive_flatmap(history, (lambda x: [x]), unidirectional=False)
set_uuids(search_history, 'id')
else:
search_history = [history.spec.template, history.process.template]
search_history.extend([msr.template for msr in history.measurements])
search_history = [x for x in search_history if (x is not None)]
res = {}
for obj in search_history:
templates = []
if isinstance(obj, HasPropertyTemplates):
for property in obj.properties:
templates.append(property[0])
if isinstance(obj, HasConditionTemplates):
for condition in obj.conditions:
templates.append(condition[0])
if isinstance(obj, HasParameterTemplates):
for parameter in obj.parameters:
templates.append(parameter[0])
headers = []
if (mode == AutoConfigureMode.PLAIN):
headers.append(obj.name)
for tmpl in templates:
try:
desc = template_to_descriptor(tmpl, headers=headers)
res[desc.key] = desc
except NoEquivalentDescriptorError:
continue
return PlatformVocabulary(entries=res)
|
def drifting(self):
'Get list of drifting times'
return [n for n in self if n.drifting]
| 5,809,092,777,298,942,000
|
Get list of drifting times
|
pyannote/core/transcription.py
|
drifting
|
Parisson/pyannote-core
|
python
|
def drifting(self):
return [n for n in self if n.drifting]
|
def anchored(self):
'Get list of anchored times'
return [n for n in self if n.anchored]
| 8,031,248,592,374,523,000
|
Get list of anchored times
|
pyannote/core/transcription.py
|
anchored
|
Parisson/pyannote-core
|
python
|
def anchored(self):
return [n for n in self if n.anchored]
|
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"Add annotation to the graph between times t1 and t2\n\n Parameters\n ----------\n t1, t2: float, str or None\n data : dict, optional\n {annotation_type: annotation_value} dictionary\n\n Example\n -------\n >>> G = Transcription()\n >>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')\n "
t1 = T(t1)
t2 = T(t2)
if (t1.anchored and t2.anchored):
assert (t1 <= t2)
super(Transcription, self).add_edge(t1, t2, key=key, attr_dict=attr_dict, **attrs)
| 1,268,003,944,537,095,000
|
Add annotation to the graph between times t1 and t2
Parameters
----------
t1, t2: float, str or None
data : dict, optional
{annotation_type: annotation_value} dictionary
Example
-------
>>> G = Transcription()
>>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')
|
pyannote/core/transcription.py
|
add_edge
|
Parisson/pyannote-core
|
python
|
def add_edge(self, t1, t2, key=None, attr_dict=None, **attrs):
"Add annotation to the graph between times t1 and t2\n\n Parameters\n ----------\n t1, t2: float, str or None\n data : dict, optional\n {annotation_type: annotation_value} dictionary\n\n Example\n -------\n >>> G = Transcription()\n >>> G.add_edge(T(1.), T(), speaker='John', 'speech'='Hello world!')\n "
t1 = T(t1)
t2 = T(t2)
if (t1.anchored and t2.anchored):
assert (t1 <= t2)
super(Transcription, self).add_edge(t1, t2, key=key, attr_dict=attr_dict, **attrs)
|
def relabel_drifting_nodes(self, mapping=None):
'Relabel drifting nodes\n\n Parameters\n ----------\n mapping : dict, optional\n A dictionary with the old labels as keys and new labels as values.\n\n Returns\n -------\n g : Transcription\n New annotation graph\n mapping : dict\n A dictionary with the new labels as keys and old labels as values.\n Can be used to get back to the version before relabelling.\n '
if (mapping is None):
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for (old, new) in old2new.iteritems()}
return (nx.relabel_nodes(self, old2new, copy=True), new2old)
| 7,044,763,319,659,581,000
|
Relabel drifting nodes
Parameters
----------
mapping : dict, optional
A dictionary with the old labels as keys and new labels as values.
Returns
-------
g : Transcription
New annotation graph
mapping : dict
A dictionary with the new labels as keys and old labels as values.
Can be used to get back to the version before relabelling.
|
pyannote/core/transcription.py
|
relabel_drifting_nodes
|
Parisson/pyannote-core
|
python
|
def relabel_drifting_nodes(self, mapping=None):
'Relabel drifting nodes\n\n Parameters\n ----------\n mapping : dict, optional\n A dictionary with the old labels as keys and new labels as values.\n\n Returns\n -------\n g : Transcription\n New annotation graph\n mapping : dict\n A dictionary with the new labels as keys and old labels as values.\n Can be used to get back to the version before relabelling.\n '
if (mapping is None):
old2new = {n: T() for n in self.drifting()}
else:
old2new = dict(mapping)
new2old = {new: old for (old, new) in old2new.iteritems()}
return (nx.relabel_nodes(self, old2new, copy=True), new2old)
|
def crop(self, source, target=None):
'Get minimum subgraph between source time and target time\n\n Parameters\n ----------\n source : Segment\n target : float or str, optional\n\n Returns\n -------\n g : Transcription\n Sub-graph between source and target\n '
if isinstance(source, Segment):
(source, target) = (source.start, source.end)
source = T(source)
target = T(target)
if (source.anchored or target.anchored):
anchored = sorted(self.anchored())
if source.drifting:
if (source not in self):
raise ValueError(('Drifting time %s is not in the transcription.' % source))
else:
from_source = ({source} | nx.algorithms.descendants(self, source))
elif (source in self):
from_source = ({source} | nx.algorithms.descendants(self, source))
elif (source < anchored[0]):
from_source = set(self)
else:
before = [n for n in anchored if (n <= source)][(- 1)]
from_source = ({before} | nx.algorithms.descendants(self, before))
if target.drifting:
if (target not in self):
raise ValueError(('Drifting time %s is not in the transcription.' % target))
else:
to_target = ({target} | nx.algorithms.ancestors(self, target))
elif (target in self):
to_target = ({target} | nx.algorithms.ancestors(self, target))
elif (target > anchored[(- 1)]):
to_target = set(self)
else:
after = [n for n in anchored if (n >= target)][0]
to_target = ({after} | nx.algorithms.ancestors(self, after))
nbunch = (from_source & to_target)
return self.subgraph(nbunch)
| -6,786,418,825,337,886,000
|
Get minimum subgraph between source time and target time
Parameters
----------
source : Segment
target : float or str, optional
Returns
-------
g : Transcription
Sub-graph between source and target
|
pyannote/core/transcription.py
|
crop
|
Parisson/pyannote-core
|
python
|
def crop(self, source, target=None):
'Get minimum subgraph between source time and target time\n\n Parameters\n ----------\n source : Segment\n target : float or str, optional\n\n Returns\n -------\n g : Transcription\n Sub-graph between source and target\n '
if isinstance(source, Segment):
(source, target) = (source.start, source.end)
source = T(source)
target = T(target)
if (source.anchored or target.anchored):
anchored = sorted(self.anchored())
if source.drifting:
if (source not in self):
raise ValueError(('Drifting time %s is not in the transcription.' % source))
else:
from_source = ({source} | nx.algorithms.descendants(self, source))
elif (source in self):
from_source = ({source} | nx.algorithms.descendants(self, source))
elif (source < anchored[0]):
from_source = set(self)
else:
before = [n for n in anchored if (n <= source)][(- 1)]
from_source = ({before} | nx.algorithms.descendants(self, before))
if target.drifting:
if (target not in self):
raise ValueError(('Drifting time %s is not in the transcription.' % target))
else:
to_target = ({target} | nx.algorithms.ancestors(self, target))
elif (target in self):
to_target = ({target} | nx.algorithms.ancestors(self, target))
elif (target > anchored[(- 1)]):
to_target = set(self)
else:
after = [n for n in anchored if (n >= target)][0]
to_target = ({after} | nx.algorithms.ancestors(self, after))
nbunch = (from_source & to_target)
return self.subgraph(nbunch)
|
def _merge(self, drifting_t, another_t):
'Helper function to merge `drifting_t` with `another_t`\n\n Assumes that both `drifting_t` and `another_t` exists.\n Also assumes that `drifting_t` is an instance of `TFloating`\n (otherwise, this might lead to weird graph configuration)\n\n Parameters\n ----------\n drifting_t :\n Existing drifting time in graph\n another_t :\n Existing time in graph\n '
for (t, _, key, data) in self.in_edges_iter(nbunch=[drifting_t], data=True, keys=True):
if self.has_edge(t, another_t, key=key):
key = None
self.add_edge(t, another_t, key=key, attr_dict=data)
for (_, t, key, data) in self.edges_iter(nbunch=[drifting_t], data=True, keys=True):
if self.has_edge(another_t, t, key=key):
key = None
self.add_edge(another_t, t, key=key, attr_dict=data)
self.remove_node(drifting_t)
| 4,310,718,418,287,191,000
|
Helper function to merge `drifting_t` with `another_t`
Assumes that both `drifting_t` and `another_t` exists.
Also assumes that `drifting_t` is an instance of `TFloating`
(otherwise, this might lead to weird graph configuration)
Parameters
----------
drifting_t :
Existing drifting time in graph
another_t :
Existing time in graph
|
pyannote/core/transcription.py
|
_merge
|
Parisson/pyannote-core
|
python
|
def _merge(self, drifting_t, another_t):
'Helper function to merge `drifting_t` with `another_t`\n\n Assumes that both `drifting_t` and `another_t` exists.\n Also assumes that `drifting_t` is an instance of `TFloating`\n (otherwise, this might lead to weird graph configuration)\n\n Parameters\n ----------\n drifting_t :\n Existing drifting time in graph\n another_t :\n Existing time in graph\n '
for (t, _, key, data) in self.in_edges_iter(nbunch=[drifting_t], data=True, keys=True):
if self.has_edge(t, another_t, key=key):
key = None
self.add_edge(t, another_t, key=key, attr_dict=data)
for (_, t, key, data) in self.edges_iter(nbunch=[drifting_t], data=True, keys=True):
if self.has_edge(another_t, t, key=key):
key = None
self.add_edge(another_t, t, key=key, attr_dict=data)
self.remove_node(drifting_t)
|
def anchor(self, drifting_t, anchored_t):
'\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n o -- [ D ] -- o ==> o -- [ A ] -- o\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Anchor `drifting_t` at `anchored_t`\n\n Parameters\n ----------\n drifting_t :\n Drifting time to anchor\n anchored_t :\n When to anchor `drifting_t`\n\n '
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert ((drifting_t in self) and drifting_t.drifting)
assert anchored_t.anchored
if (anchored_t not in self):
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
| -7,984,834,150,691,994,000
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ D ] -- o ==> o -- [ A ] -- o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anchor `drifting_t` at `anchored_t`
Parameters
----------
drifting_t :
Drifting time to anchor
anchored_t :
When to anchor `drifting_t`
|
pyannote/core/transcription.py
|
anchor
|
Parisson/pyannote-core
|
python
|
def anchor(self, drifting_t, anchored_t):
'\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n o -- [ D ] -- o ==> o -- [ A ] -- o\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Anchor `drifting_t` at `anchored_t`\n\n Parameters\n ----------\n drifting_t :\n Drifting time to anchor\n anchored_t :\n When to anchor `drifting_t`\n\n '
drifting_t = T(drifting_t)
anchored_t = T(anchored_t)
assert ((drifting_t in self) and drifting_t.drifting)
assert anchored_t.anchored
if (anchored_t not in self):
self.add_node(anchored_t)
self._merge(drifting_t, anchored_t)
|
def align(self, one_t, another_t):
'\n Align two (potentially drifting) times\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n o -- [ F ] -- o o o\n ⟍ ⟋\n ==> [ F ]\n ⟋ ⟍\n o -- [ f ] -- o o o\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Parameters\n ----------\n one_t, another_t\n Two times to be aligned.\n\n Notes\n -----\n * If both `one_t` and `another_t` are drifting, the resulting graph\n will no longer contain `one_t`.\n * In case `another_t` is anchored, `align` is equivalent to `anchor`.\n * `one_t` and `another_t` cannot be both anchored.\n\n '
one_t = T(one_t)
another_t = T(another_t)
assert (one_t in self)
assert (another_t in self)
if one_t.drifting:
self._merge(one_t, another_t)
elif another_t.drifting:
self._merge(another_t, one_t)
else:
raise ValueError('Cannot align two anchored times')
| -763,909,109,057,423,700
|
Align two (potentially drifting) times
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
o -- [ F ] -- o o o
⟍ ⟋
==> [ F ]
⟋ ⟍
o -- [ f ] -- o o o
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Parameters
----------
one_t, another_t
Two times to be aligned.
Notes
-----
* If both `one_t` and `another_t` are drifting, the resulting graph
will no longer contain `one_t`.
* In case `another_t` is anchored, `align` is equivalent to `anchor`.
* `one_t` and `another_t` cannot be both anchored.
|
pyannote/core/transcription.py
|
align
|
Parisson/pyannote-core
|
python
|
def align(self, one_t, another_t):
'\n Align two (potentially drifting) times\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n o -- [ F ] -- o o o\n ⟍ ⟋\n ==> [ F ]\n ⟋ ⟍\n o -- [ f ] -- o o o\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Parameters\n ----------\n one_t, another_t\n Two times to be aligned.\n\n Notes\n -----\n * If both `one_t` and `another_t` are drifting, the resulting graph\n will no longer contain `one_t`.\n * In case `another_t` is anchored, `align` is equivalent to `anchor`.\n * `one_t` and `another_t` cannot be both anchored.\n\n '
one_t = T(one_t)
another_t = T(another_t)
assert (one_t in self)
assert (another_t in self)
if one_t.drifting:
self._merge(one_t, another_t)
elif another_t.drifting:
self._merge(another_t, one_t)
else:
raise ValueError('Cannot align two anchored times')
|
def pre_align(self, t1, t2, t):
"\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n p -- [ t1 ] p [ t1 ]\n ⟍ ⟋\n ==> [ t ]\n ⟋ ⟍\n p' -- [ t2 ] p' [ t2 ]\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n "
t1 = T(t1)
t2 = T(t2)
t = T(t)
pred1 = self.predecessors(t1)
for p in pred1:
for (key, data) in self[p][t1].iteritems():
assert (not data)
pred2 = self.predecessors(t2)
for p in pred2:
for (key, data) in self[p][t2].iteritems():
assert (not data)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in (set(pred1) | set(pred2)):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
| -7,347,736,569,357,508,000
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p -- [ t1 ] p [ t1 ]
⟍ ⟋
==> [ t ]
⟋ ⟍
p' -- [ t2 ] p' [ t2 ]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
pyannote/core/transcription.py
|
pre_align
|
Parisson/pyannote-core
|
python
|
def pre_align(self, t1, t2, t):
"\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n p -- [ t1 ] p [ t1 ]\n ⟍ ⟋\n ==> [ t ]\n ⟋ ⟍\n p' -- [ t2 ] p' [ t2 ]\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n "
t1 = T(t1)
t2 = T(t2)
t = T(t)
pred1 = self.predecessors(t1)
for p in pred1:
for (key, data) in self[p][t1].iteritems():
assert (not data)
pred2 = self.predecessors(t2)
for p in pred2:
for (key, data) in self[p][t2].iteritems():
assert (not data)
for p in pred1:
for key in list(self[p][t1]):
self.remove_edge(p, t1, key=key)
for p in pred2:
for key in list(self[p][t2]):
self.remove_edge(p, t2, key=key)
for p in (set(pred1) | set(pred2)):
self.add_edge(p, t)
self.add_edge(t, t1)
self.add_edge(t, t2)
|
def post_align(self, t1, t2, t):
"\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n [ t1 ] -- s [ t1 ] s\n ⟍ ⟋\n ==> [ t ]\n ⟋ ⟍\n [ t2 ] -- s' [ t2 ] s'\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n "
t1 = T(t1)
t2 = T(t2)
t = T(t)
succ1 = self.successors(t1)
for s in succ1:
for (key, data) in self[t1][s].iteritems():
assert (not data)
succ2 = self.successors(t2)
for s in succ2:
for (key, data) in self[t2][s].iteritems():
assert (not data)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in (set(succ1) | set(succ2)):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
| 2,734,299,867,145,611,000
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[ t1 ] -- s [ t1 ] s
⟍ ⟋
==> [ t ]
⟋ ⟍
[ t2 ] -- s' [ t2 ] s'
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
pyannote/core/transcription.py
|
post_align
|
Parisson/pyannote-core
|
python
|
def post_align(self, t1, t2, t):
"\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n [ t1 ] -- s [ t1 ] s\n ⟍ ⟋\n ==> [ t ]\n ⟋ ⟍\n [ t2 ] -- s' [ t2 ] s'\n\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n "
t1 = T(t1)
t2 = T(t2)
t = T(t)
succ1 = self.successors(t1)
for s in succ1:
for (key, data) in self[t1][s].iteritems():
assert (not data)
succ2 = self.successors(t2)
for s in succ2:
for (key, data) in self[t2][s].iteritems():
assert (not data)
for s in succ1:
for key in list(self[t1][s]):
self.remove_edge(t1, s, key=key)
for s in succ2:
for key in list(self[t2][s]):
self.remove_edge(t2, s, key=key)
for s in (set(succ1) | set(succ2)):
self.add_edge(t, s)
self.add_edge(t1, t)
self.add_edge(t2, t)
|
def ordering_graph(self):
'Ordering graph\n\n t1 --> t2 in the ordering graph indicates that t1 happens before t2.\n A missing edge simply means that it is not clear yet.\n\n '
g = nx.DiGraph()
for t in self.nodes_iter():
g.add_node(t)
for (t1, t2) in self.edges_iter():
g.add_edge(t1, t2)
anchored = sorted(self.anchored())
for (t1, t2) in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
| -4,126,337,984,936,054,000
|
Ordering graph
t1 --> t2 in the ordering graph indicates that t1 happens before t2.
A missing edge simply means that it is not clear yet.
|
pyannote/core/transcription.py
|
ordering_graph
|
Parisson/pyannote-core
|
python
|
def ordering_graph(self):
'Ordering graph\n\n t1 --> t2 in the ordering graph indicates that t1 happens before t2.\n A missing edge simply means that it is not clear yet.\n\n '
g = nx.DiGraph()
for t in self.nodes_iter():
g.add_node(t)
for (t1, t2) in self.edges_iter():
g.add_edge(t1, t2)
anchored = sorted(self.anchored())
for (t1, t2) in itertools.combinations(anchored, 2):
g.add_edge(t1, t2)
_g = g.copy()
for t1 in _g:
for t2 in set([target for (_, target) in nx.bfs_edges(_g, t1)]):
g.add_edge(t1, t2)
return g
|
def temporal_sort(self):
'Get nodes sorted in temporal order\n\n Remark\n ------\n This relies on a combination of temporal ordering of anchored times\n and topological ordering for drifting times.\n To be 100% sure that one drifting time happens before another time,\n check the ordering graph (method .ordering_graph()).\n '
g = nx.DiGraph()
for t in self.nodes_iter():
g.add_node(t)
for (t1, t2) in self.edges_iter():
g.add_edge(t1, t2)
anchored = sorted(self.anchored())
for (t1, t2) in pairwise(anchored):
g.add_edge(t1, t2)
return nx.topological_sort(g)
| 8,249,066,904,300,179,000
|
Get nodes sorted in temporal order
Remark
------
This relies on a combination of temporal ordering of anchored times
and topological ordering for drifting times.
To be 100% sure that one drifting time happens before another time,
check the ordering graph (method .ordering_graph()).
|
pyannote/core/transcription.py
|
temporal_sort
|
Parisson/pyannote-core
|
python
|
def temporal_sort(self):
'Get nodes sorted in temporal order\n\n Remark\n ------\n This relies on a combination of temporal ordering of anchored times\n and topological ordering for drifting times.\n To be 100% sure that one drifting time happens before another time,\n check the ordering graph (method .ordering_graph()).\n '
g = nx.DiGraph()
for t in self.nodes_iter():
g.add_node(t)
for (t1, t2) in self.edges_iter():
g.add_edge(t1, t2)
anchored = sorted(self.anchored())
for (t1, t2) in pairwise(anchored):
g.add_edge(t1, t2)
return nx.topological_sort(g)
|
def ordered_edges_iter(self, nbunch=None, data=False, keys=False):
'Return an iterator over the edges in temporal order.\n\n Ordered edges are returned as tuples with optional data and keys\n in the order (t1, t2, key, data).\n\n Parameters\n ----------\n nbunch : iterable container, optional (default= all nodes)\n A container of nodes. The container will be iterated\n through once.\n data : bool, optional (default=False)\n If True, return edge attribute dict with each edge.\n keys : bool, optional (default=False)\n If True, return edge keys with each edge.\n\n Returns\n -------\n edge_iter : iterator\n An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.\n\n Notes\n -----\n Nodes in nbunch that are not in the graph will be (quietly) ignored.\n For the same reason you should not completely trust temporal_sort,\n use ordered_edges_iter with care.\n '
nodes = self.temporal_sort()
if nbunch:
nbunch = list(nbunch)
nodes = [n for n in nodes if (n in nbunch)]
return self.edges_iter(nbunch=nodes, data=data, keys=keys)
| -6,821,498,659,858,628,000
|
Return an iterator over the edges in temporal order.
Ordered edges are returned as tuples with optional data and keys
in the order (t1, t2, key, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict with each edge.
keys : bool, optional (default=False)
If True, return edge keys with each edge.
Returns
-------
edge_iter : iterator
An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For the same reason you should not completely trust temporal_sort,
use ordered_edges_iter with care.
|
pyannote/core/transcription.py
|
ordered_edges_iter
|
Parisson/pyannote-core
|
python
|
def ordered_edges_iter(self, nbunch=None, data=False, keys=False):
'Return an iterator over the edges in temporal order.\n\n Ordered edges are returned as tuples with optional data and keys\n in the order (t1, t2, key, data).\n\n Parameters\n ----------\n nbunch : iterable container, optional (default= all nodes)\n A container of nodes. The container will be iterated\n through once.\n data : bool, optional (default=False)\n If True, return edge attribute dict with each edge.\n keys : bool, optional (default=False)\n If True, return edge keys with each edge.\n\n Returns\n -------\n edge_iter : iterator\n An iterator of (u,v), (u,v,d) or (u,v,key,d) tuples of edges.\n\n Notes\n -----\n Nodes in nbunch that are not in the graph will be (quietly) ignored.\n For the same reason you should not completely trust temporal_sort,\n use ordered_edges_iter with care.\n '
nodes = self.temporal_sort()
if nbunch:
nbunch = list(nbunch)
nodes = [n for n in nodes if (n in nbunch)]
return self.edges_iter(nbunch=nodes, data=data, keys=keys)
|
def timerange(self, t1, t2, inside=True, sort=None):
'Infer edge timerange from graph structure\n\n a -- ... -- [ t1 ] -- A -- ... -- B -- [ t2 ] -- ... -- b\n\n ==> [a, b] (inside=False) or [A, B] (inside=True)\n\n Parameters\n ----------\n t1, t2 : anchored or drifting times\n inside : boolean, optional\n\n Returns\n -------\n segment : Segment\n '
t1 = T(t1)
t2 = T(t2)
if (sort is None):
sort = self.temporal_sort()
if t1.anchored:
start = t1
else:
start = None
istart = sort.index(t1)
search = (sort[(istart + 1):] if inside else sort[(istart - 1)::(- 1)])
for t in search:
if t.anchored:
start = t
break
if (start is None):
start = (TEnd if inside else TStart)
if t2.anchored:
end = t2
else:
end = None
iend = sort.index(t2)
search = (sort[(iend - 1)::(- 1)] if inside else sort[(iend + 1):])
for t in search:
if t.anchored:
end = t
break
if (end is None):
end = (TStart if inside else TEnd)
return Segment(start=start, end=end)
| 7,552,954,080,013,591,000
|
Infer edge timerange from graph structure
a -- ... -- [ t1 ] -- A -- ... -- B -- [ t2 ] -- ... -- b
==> [a, b] (inside=False) or [A, B] (inside=True)
Parameters
----------
t1, t2 : anchored or drifting times
inside : boolean, optional
Returns
-------
segment : Segment
|
pyannote/core/transcription.py
|
timerange
|
Parisson/pyannote-core
|
python
|
def timerange(self, t1, t2, inside=True, sort=None):
'Infer edge timerange from graph structure\n\n a -- ... -- [ t1 ] -- A -- ... -- B -- [ t2 ] -- ... -- b\n\n ==> [a, b] (inside=False) or [A, B] (inside=True)\n\n Parameters\n ----------\n t1, t2 : anchored or drifting times\n inside : boolean, optional\n\n Returns\n -------\n segment : Segment\n '
t1 = T(t1)
t2 = T(t2)
if (sort is None):
sort = self.temporal_sort()
if t1.anchored:
start = t1
else:
start = None
istart = sort.index(t1)
search = (sort[(istart + 1):] if inside else sort[(istart - 1)::(- 1)])
for t in search:
if t.anchored:
start = t
break
if (start is None):
start = (TEnd if inside else TStart)
if t2.anchored:
end = t2
else:
end = None
iend = sort.index(t2)
search = (sort[(iend - 1)::(- 1)] if inside else sort[(iend + 1):])
for t in search:
if t.anchored:
end = t
break
if (end is None):
end = (TStart if inside else TEnd)
return Segment(start=start, end=end)
|
def build_options(gens, args=None):
'Construct options from keyword arguments or ... options. '
if (args is None):
(gens, args) = ((), gens)
if ((len(args) != 1) or ('opt' not in args) or gens):
return Options(gens, args)
else:
return args['opt']
| -3,557,484,308,317,494,000
|
Construct options from keyword arguments or ... options.
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py
|
build_options
|
18padx08/PPTex
|
python
|
def build_options(gens, args=None):
' '
if (args is None):
(gens, args) = ((), gens)
if ((len(args) != 1) or ('opt' not in args) or gens):
return Options(gens, args)
else:
return args['opt']
|
def allowed_flags(args, flags):
"\n Allow specified flags to be used in the given context.\n\n Examples\n ========\n\n >>> from sympy.polys.polyoptions import allowed_flags\n >>> from sympy.polys.domains import ZZ\n\n >>> allowed_flags({'domain': ZZ}, [])\n\n >>> allowed_flags({'domain': ZZ, 'frac': True}, [])\n Traceback (most recent call last):\n ...\n FlagError: 'frac' flag is not allowed in this context\n\n >>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])\n\n "
flags = set(flags)
for arg in args.keys():
try:
if (Options.__options__[arg].is_Flag and (not (arg in flags))):
raise FlagError(("'%s' flag is not allowed in this context" % arg))
except KeyError:
raise OptionError(("'%s' is not a valid option" % arg))
| 7,278,552,867,825,473,000
|
Allow specified flags to be used in the given context.
Examples
========
>>> from sympy.polys.polyoptions import allowed_flags
>>> from sympy.polys.domains import ZZ
>>> allowed_flags({'domain': ZZ}, [])
>>> allowed_flags({'domain': ZZ, 'frac': True}, [])
Traceback (most recent call last):
...
FlagError: 'frac' flag is not allowed in this context
>>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py
|
allowed_flags
|
18padx08/PPTex
|
python
|
def allowed_flags(args, flags):
"\n Allow specified flags to be used in the given context.\n\n Examples\n ========\n\n >>> from sympy.polys.polyoptions import allowed_flags\n >>> from sympy.polys.domains import ZZ\n\n >>> allowed_flags({'domain': ZZ}, [])\n\n >>> allowed_flags({'domain': ZZ, 'frac': True}, [])\n Traceback (most recent call last):\n ...\n FlagError: 'frac' flag is not allowed in this context\n\n >>> allowed_flags({'domain': ZZ, 'frac': True}, ['frac'])\n\n "
flags = set(flags)
for arg in args.keys():
try:
if (Options.__options__[arg].is_Flag and (not (arg in flags))):
raise FlagError(("'%s' flag is not allowed in this context" % arg))
except KeyError:
raise OptionError(("'%s' is not a valid option" % arg))
|
def set_defaults(options, **defaults):
'Update options with default values. '
if ('defaults' not in options):
options = dict(options)
options['defaults'] = defaults
return options
| -2,880,402,475,341,957,000
|
Update options with default values.
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py
|
set_defaults
|
18padx08/PPTex
|
python
|
def set_defaults(options, **defaults):
' '
if ('defaults' not in options):
options = dict(options)
options['defaults'] = defaults
return options
|
@classmethod
def _init_dependencies_order(cls):
"Resolve the order of options' processing. "
if (cls.__order__ is None):
(vertices, edges) = ([], set([]))
for (name, option) in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError('cycle detected in sympy.polys options framework')
| 584,395,546,733,577,600
|
Resolve the order of options' processing.
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py
|
_init_dependencies_order
|
18padx08/PPTex
|
python
|
@classmethod
def _init_dependencies_order(cls):
" "
if (cls.__order__ is None):
(vertices, edges) = ([], set([]))
for (name, option) in cls.__options__.items():
vertices.append(name)
for _name in option.after:
edges.add((_name, name))
for _name in option.before:
edges.add((name, _name))
try:
cls.__order__ = topological_sort((vertices, list(edges)))
except ValueError:
raise RuntimeError('cycle detected in sympy.polys options framework')
|
def clone(self, updates={}):
'Clone ``self`` and update specified options. '
obj = dict.__new__(self.__class__)
for (option, value) in self.items():
obj[option] = value
for (option, value) in updates.items():
obj[option] = value
return obj
| -4,789,163,976,798,031,000
|
Clone ``self`` and update specified options.
|
PPTexEnv_x86_64/lib/python2.7/site-packages/sympy/polys/polyoptions.py
|
clone
|
18padx08/PPTex
|
python
|
def clone(self, updates={}):
' '
obj = dict.__new__(self.__class__)
for (option, value) in self.items():
obj[option] = value
for (option, value) in updates.items():
obj[option] = value
return obj
|
def test_headRequest(self):
'\n L{Data.render} returns an empty response body for a I{HEAD} request.\n '
data = static.Data(b'foo', 'bar')
request = DummyRequest([''])
request.method = b'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'')
d.addCallback(cbRendered)
return d
| 5,796,402,234,009,444,000
|
L{Data.render} returns an empty response body for a I{HEAD} request.
|
src/twisted/web/test/test_static.py
|
test_headRequest
|
ikingye/twisted
|
python
|
def test_headRequest(self):
'\n \n '
data = static.Data(b'foo', 'bar')
request = DummyRequest([])
request.method = b'HEAD'
d = _render(data, request)
def cbRendered(ignored):
self.assertEqual(b.join(request.written), b)
d.addCallback(cbRendered)
return d
|
def test_invalidMethod(self):
'\n L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},\n non-I{HEAD} request.\n '
data = static.Data(b'foo', b'bar')
request = DummyRequest([b''])
request.method = b'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
| -5,311,282,060,070,802,000
|
L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
|
src/twisted/web/test/test_static.py
|
test_invalidMethod
|
ikingye/twisted
|
python
|
def test_invalidMethod(self):
'\n L{Data.render} raises L{UnsupportedMethod} in response to a non-I{GET},\n non-I{HEAD} request.\n '
data = static.Data(b'foo', b'bar')
request = DummyRequest([b])
request.method = b'POST'
self.assertRaises(UnsupportedMethod, data.render, request)
|
def test_ignoredExtTrue(self):
'\n Passing C{1} as the value to L{File}\'s C{ignoredExts} argument\n issues a warning and sets the ignored extensions to the\n wildcard C{"*"}.\n '
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=1)
self.assertEqual(file.ignoredExts, ['*'])
self.assertEqual(len(caughtWarnings), 1)
| -4,848,127,279,368,782,000
|
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the
wildcard C{"*"}.
|
src/twisted/web/test/test_static.py
|
test_ignoredExtTrue
|
ikingye/twisted
|
python
|
def test_ignoredExtTrue(self):
'\n Passing C{1} as the value to L{File}\'s C{ignoredExts} argument\n issues a warning and sets the ignored extensions to the\n wildcard C{"*"}.\n '
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=1)
self.assertEqual(file.ignoredExts, ['*'])
self.assertEqual(len(caughtWarnings), 1)
|
def test_ignoredExtFalse(self):
"\n Passing C{1} as the value to L{File}'s C{ignoredExts} argument\n issues a warning and sets the ignored extensions to the empty\n list.\n "
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=0)
self.assertEqual(file.ignoredExts, [])
self.assertEqual(len(caughtWarnings), 1)
| 1,052,521,521,552,563,600
|
Passing C{1} as the value to L{File}'s C{ignoredExts} argument
issues a warning and sets the ignored extensions to the empty
list.
|
src/twisted/web/test/test_static.py
|
test_ignoredExtFalse
|
ikingye/twisted
|
python
|
def test_ignoredExtFalse(self):
"\n Passing C{1} as the value to L{File}'s C{ignoredExts} argument\n issues a warning and sets the ignored extensions to the empty\n list.\n "
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=0)
self.assertEqual(file.ignoredExts, [])
self.assertEqual(len(caughtWarnings), 1)
|
def test_allowExt(self):
"\n Passing C{1} as the value to L{File}'s C{allowExt} argument\n issues a warning and sets the ignored extensions to the\n wildcard C{*}.\n "
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=True)
self.assertEqual(file.ignoredExts, ['*'])
self.assertEqual(len(caughtWarnings), 1)
| -3,628,029,843,179,005,400
|
Passing C{1} as the value to L{File}'s C{allowExt} argument
issues a warning and sets the ignored extensions to the
wildcard C{*}.
|
src/twisted/web/test/test_static.py
|
test_allowExt
|
ikingye/twisted
|
python
|
def test_allowExt(self):
"\n Passing C{1} as the value to L{File}'s C{allowExt} argument\n issues a warning and sets the ignored extensions to the\n wildcard C{*}.\n "
with warnings.catch_warnings(record=True) as caughtWarnings:
file = static.File(self.mktemp(), ignoredExts=True)
self.assertEqual(file.ignoredExts, ['*'])
self.assertEqual(len(caughtWarnings), 1)
|
def test_invalidMethod(self):
'\n L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},\n non-I{HEAD} request.\n '
request = DummyRequest([b''])
request.method = b'POST'
path = FilePath(self.mktemp())
path.setContent(b'foo')
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
| -420,214,662,511,013,250
|
L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},
non-I{HEAD} request.
|
src/twisted/web/test/test_static.py
|
test_invalidMethod
|
ikingye/twisted
|
python
|
def test_invalidMethod(self):
'\n L{File.render} raises L{UnsupportedMethod} in response to a non-I{GET},\n non-I{HEAD} request.\n '
request = DummyRequest([b])
request.method = b'POST'
path = FilePath(self.mktemp())
path.setContent(b'foo')
file = static.File(path.path)
self.assertRaises(UnsupportedMethod, file.render, request)
|
def test_notFound(self):
'\n If a request is made which encounters a L{File} before a final segment\n which does not correspond to any file in the path the L{File} was\n created with, a not found response is sent.\n '
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
| 2,187,499,811,497,714,200
|
If a request is made which encounters a L{File} before a final segment
which does not correspond to any file in the path the L{File} was
created with, a not found response is sent.
|
src/twisted/web/test/test_static.py
|
test_notFound
|
ikingye/twisted
|
python
|
def test_notFound(self):
'\n If a request is made which encounters a L{File} before a final segment\n which does not correspond to any file in the path the L{File} was\n created with, a not found response is sent.\n '
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'foobar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
|
def test_emptyChild(self):
"\n The C{''} child of a L{File} which corresponds to a directory in the\n filesystem is a L{DirectoryLister}.\n "
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b''])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
| -5,078,275,424,837,194,000
|
The C{''} child of a L{File} which corresponds to a directory in the
filesystem is a L{DirectoryLister}.
|
src/twisted/web/test/test_static.py
|
test_emptyChild
|
ikingye/twisted
|
python
|
def test_emptyChild(self):
"\n The C{} child of a L{File} which corresponds to a directory in the\n filesystem is a L{DirectoryLister}.\n "
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b])
child = resource.getChildForRequest(file, request)
self.assertIsInstance(child, static.DirectoryLister)
self.assertEqual(child.path, base.path)
|
def test_emptyChildUnicodeParent(self):
"\n The C{u''} child of a L{File} which corresponds to a directory\n whose path is text is a L{DirectoryLister} that renders to a\n binary listing.\n\n @see: U{https://twistedmatrix.com/trac/ticket/9438}\n "
textBase = FilePath(self.mktemp()).asTextMode()
textBase.makedirs()
textBase.child(u'text-file').open('w').close()
textFile = static.File(textBase.path)
request = DummyRequest([b''])
child = resource.getChildForRequest(textFile, request)
self.assertIsInstance(child, static.DirectoryLister)
nativePath = compat.nativeString(textBase.path)
self.assertEqual(child.path, nativePath)
response = child.render(request)
self.assertIsInstance(response, bytes)
| 992,252,466,549,817,900
|
The C{u''} child of a L{File} which corresponds to a directory
whose path is text is a L{DirectoryLister} that renders to a
binary listing.
@see: U{https://twistedmatrix.com/trac/ticket/9438}
|
src/twisted/web/test/test_static.py
|
test_emptyChildUnicodeParent
|
ikingye/twisted
|
python
|
def test_emptyChildUnicodeParent(self):
"\n The C{u} child of a L{File} which corresponds to a directory\n whose path is text is a L{DirectoryLister} that renders to a\n binary listing.\n\n @see: U{https://twistedmatrix.com/trac/ticket/9438}\n "
textBase = FilePath(self.mktemp()).asTextMode()
textBase.makedirs()
textBase.child(u'text-file').open('w').close()
textFile = static.File(textBase.path)
request = DummyRequest([b])
child = resource.getChildForRequest(textFile, request)
self.assertIsInstance(child, static.DirectoryLister)
nativePath = compat.nativeString(textBase.path)
self.assertEqual(child.path, nativePath)
response = child.render(request)
self.assertIsInstance(response, bytes)
|
def test_securityViolationNotFound(self):
'\n If a request is made which encounters a L{File} before a final segment\n which cannot be looked up in the filesystem due to security\n considerations, a not found response is sent.\n '
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
| -566,705,790,611,264,000
|
If a request is made which encounters a L{File} before a final segment
which cannot be looked up in the filesystem due to security
considerations, a not found response is sent.
|
src/twisted/web/test/test_static.py
|
test_securityViolationNotFound
|
ikingye/twisted
|
python
|
def test_securityViolationNotFound(self):
'\n If a request is made which encounters a L{File} before a final segment\n which cannot be looked up in the filesystem due to security\n considerations, a not found response is sent.\n '
base = FilePath(self.mktemp())
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'..'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
d.addCallback(cbRendered)
return d
|
@skipIf(platform.isWindows(), 'Cannot remove read permission on Windows')
def test_forbiddenResource(self):
'\n If the file in the filesystem which would satisfy a request cannot be\n read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.\n '
base = FilePath(self.mktemp())
base.setContent(b'')
self.addCleanup(base.chmod, 448)
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([b''])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
| 2,634,763,309,614,467,000
|
If the file in the filesystem which would satisfy a request cannot be
read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.
|
src/twisted/web/test/test_static.py
|
test_forbiddenResource
|
ikingye/twisted
|
python
|
@skipIf(platform.isWindows(), 'Cannot remove read permission on Windows')
def test_forbiddenResource(self):
'\n If the file in the filesystem which would satisfy a request cannot be\n read, L{File.render} sets the HTTP response code to I{FORBIDDEN}.\n '
base = FilePath(self.mktemp())
base.setContent(b)
self.addCleanup(base.chmod, 448)
base.chmod(0)
file = static.File(base.path)
request = DummyRequest([b])
d = self._render(file, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 403)
d.addCallback(cbRendered)
return d
|
def test_undecodablePath(self):
'\n A request whose path cannot be decoded as UTF-8 receives a not\n found response, and the failure is logged.\n '
path = self.mktemp()
if isinstance(path, bytes):
path = path.decode('ascii')
base = FilePath(path)
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'\xff'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
self.assertEqual(len(self.flushLoggedErrors(UnicodeDecodeError)), 1)
d.addCallback(cbRendered)
return d
| 6,257,857,815,952,183,000
|
A request whose path cannot be decoded as UTF-8 receives a not
found response, and the failure is logged.
|
src/twisted/web/test/test_static.py
|
test_undecodablePath
|
ikingye/twisted
|
python
|
def test_undecodablePath(self):
'\n A request whose path cannot be decoded as UTF-8 receives a not\n found response, and the failure is logged.\n '
path = self.mktemp()
if isinstance(path, bytes):
path = path.decode('ascii')
base = FilePath(path)
base.makedirs()
file = static.File(base.path)
request = DummyRequest([b'\xff'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(request.responseCode, 404)
self.assertEqual(len(self.flushLoggedErrors(UnicodeDecodeError)), 1)
d.addCallback(cbRendered)
return d
|
def test_forbiddenResource_default(self):
'\n L{File.forbidden} defaults to L{resource.ForbiddenResource}.\n '
self.assertIsInstance(static.File(b'.').forbidden, resource.ForbiddenResource)
| -3,599,652,922,653,693,400
|
L{File.forbidden} defaults to L{resource.ForbiddenResource}.
|
src/twisted/web/test/test_static.py
|
test_forbiddenResource_default
|
ikingye/twisted
|
python
|
def test_forbiddenResource_default(self):
'\n \n '
self.assertIsInstance(static.File(b'.').forbidden, resource.ForbiddenResource)
|
def test_forbiddenResource_customize(self):
'\n The resource rendered for forbidden requests is stored as a class\n member so that users can customize it.\n '
base = FilePath(self.mktemp())
base.setContent(b'')
markerResponse = b'custom-forbidden-response'
def failingOpenForReading():
raise IOError(errno.EACCES, '')
class CustomForbiddenResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
forbidden = CustomForbiddenResource()
fileResource = CustomStaticFile(base.path)
fileResource.openForReading = failingOpenForReading
request = DummyRequest([b''])
result = fileResource.render(request)
self.assertEqual(markerResponse, result)
| 7,500,880,065,751,543,000
|
The resource rendered for forbidden requests is stored as a class
member so that users can customize it.
|
src/twisted/web/test/test_static.py
|
test_forbiddenResource_customize
|
ikingye/twisted
|
python
|
def test_forbiddenResource_customize(self):
'\n The resource rendered for forbidden requests is stored as a class\n member so that users can customize it.\n '
base = FilePath(self.mktemp())
base.setContent(b)
markerResponse = b'custom-forbidden-response'
def failingOpenForReading():
raise IOError(errno.EACCES, )
class CustomForbiddenResource(resource.Resource):
def render(self, request):
return markerResponse
class CustomStaticFile(static.File):
forbidden = CustomForbiddenResource()
fileResource = CustomStaticFile(base.path)
fileResource.openForReading = failingOpenForReading
request = DummyRequest([b])
result = fileResource.render(request)
self.assertEqual(markerResponse, result)
|
def test_indexNames(self):
"\n If a request is made which encounters a L{File} before a final empty\n segment, a file in the L{File} instance's C{indexNames} list which\n exists in the path the L{File} was created with is served as the\n response to the request.\n "
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent(b'baz')
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([b''])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
self.assertEqual(request.responseHeaders.getRawHeaders(b'content-length')[0], b'3')
d.addCallback(cbRendered)
return d
| -3,557,248,638,458,413,000
|
If a request is made which encounters a L{File} before a final empty
segment, a file in the L{File} instance's C{indexNames} list which
exists in the path the L{File} was created with is served as the
response to the request.
|
src/twisted/web/test/test_static.py
|
test_indexNames
|
ikingye/twisted
|
python
|
def test_indexNames(self):
"\n If a request is made which encounters a L{File} before a final empty\n segment, a file in the L{File} instance's C{indexNames} list which\n exists in the path the L{File} was created with is served as the\n response to the request.\n "
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent(b'baz')
file = static.File(base.path)
file.indexNames = ['foo.bar']
request = DummyRequest([b])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b.join(request.written), b'baz')
self.assertEqual(request.responseHeaders.getRawHeaders(b'content-length')[0], b'3')
d.addCallback(cbRendered)
return d
|
def test_staticFile(self):
'\n If a request is made which encounters a L{File} before a final segment\n which names a file in the path the L{File} was created with, that file\n is served as the response to the request.\n '
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent(b'baz')
file = static.File(base.path)
request = DummyRequest([b'foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), b'baz')
self.assertEqual(request.responseHeaders.getRawHeaders(b'content-length')[0], b'3')
d.addCallback(cbRendered)
return d
| 2,431,746,271,335,141,000
|
If a request is made which encounters a L{File} before a final segment
which names a file in the path the L{File} was created with, that file
is served as the response to the request.
|
src/twisted/web/test/test_static.py
|
test_staticFile
|
ikingye/twisted
|
python
|
def test_staticFile(self):
'\n If a request is made which encounters a L{File} before a final segment\n which names a file in the path the L{File} was created with, that file\n is served as the response to the request.\n '
base = FilePath(self.mktemp())
base.makedirs()
base.child('foo.bar').setContent(b'baz')
file = static.File(base.path)
request = DummyRequest([b'foo.bar'])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b.join(request.written), b'baz')
self.assertEqual(request.responseHeaders.getRawHeaders(b'content-length')[0], b'3')
d.addCallback(cbRendered)
return d
|
@skipIf((sys.getfilesystemencoding().lower() not in ('utf-8', 'mcbs')), 'Cannot write unicode filenames with file system encoding of {}'.format(sys.getfilesystemencoding()))
def test_staticFileUnicodeFileName(self):
'\n A request for a existing unicode file path encoded as UTF-8\n returns the contents of that file.\n '
name = u'ῆ'
content = b'content'
base = FilePath(self.mktemp())
base.makedirs()
base.child(name).setContent(content)
file = static.File(base.path)
request = DummyRequest([name.encode('utf-8')])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b''.join(request.written), content)
self.assertEqual(request.responseHeaders.getRawHeaders(b'content-length')[0], networkString(str(len(content))))
d.addCallback(cbRendered)
return d
| -9,114,157,304,539,470,000
|
A request for a existing unicode file path encoded as UTF-8
returns the contents of that file.
|
src/twisted/web/test/test_static.py
|
test_staticFileUnicodeFileName
|
ikingye/twisted
|
python
|
@skipIf((sys.getfilesystemencoding().lower() not in ('utf-8', 'mcbs')), 'Cannot write unicode filenames with file system encoding of {}'.format(sys.getfilesystemencoding()))
def test_staticFileUnicodeFileName(self):
'\n A request for a existing unicode file path encoded as UTF-8\n returns the contents of that file.\n '
name = u'ῆ'
content = b'content'
base = FilePath(self.mktemp())
base.makedirs()
base.child(name).setContent(content)
file = static.File(base.path)
request = DummyRequest([name.encode('utf-8')])
child = resource.getChildForRequest(file, request)
d = self._render(child, request)
def cbRendered(ignored):
self.assertEqual(b.join(request.written), content)
self.assertEqual(request.responseHeaders.getRawHeaders(b'content-length')[0], networkString(str(len(content))))
d.addCallback(cbRendered)
return d
|
def test_staticFileDeletedGetChild(self):
'\n A L{static.File} created for a directory which does not exist should\n return childNotFound from L{static.File.getChild}.\n '
staticFile = static.File(self.mktemp())
request = DummyRequest([b'foo.bar'])
child = staticFile.getChild(b'foo.bar', request)
self.assertEqual(child, staticFile.childNotFound)
| -6,216,197,732,870,106,000
|
A L{static.File} created for a directory which does not exist should
return childNotFound from L{static.File.getChild}.
|
src/twisted/web/test/test_static.py
|
test_staticFileDeletedGetChild
|
ikingye/twisted
|
python
|
def test_staticFileDeletedGetChild(self):
'\n A L{static.File} created for a directory which does not exist should\n return childNotFound from L{static.File.getChild}.\n '
staticFile = static.File(self.mktemp())
request = DummyRequest([b'foo.bar'])
child = staticFile.getChild(b'foo.bar', request)
self.assertEqual(child, staticFile.childNotFound)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.