id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
26,100 | empymod/empymod | empymod/model.py | wavenumber | def wavenumber(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2):
r"""Depreciated. Use `dipole_k` instead."""
# Issue warning
mesg = ("\n The use of `model.wavenumber` is deprecated and will " +
"be removed;\n use `model.dipole_k` instead.")
warnings.warn(mesg, DeprecationWarning)
return dipole_k(src, rec, depth, res, freq, wavenumber, ab, aniso, epermH,
epermV, mpermH, mpermV, verb) | python | def wavenumber(src, rec, depth, res, freq, wavenumber, ab=11, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None, verb=2):
r"""Depreciated. Use `dipole_k` instead."""
# Issue warning
mesg = ("\n The use of `model.wavenumber` is deprecated and will " +
"be removed;\n use `model.dipole_k` instead.")
warnings.warn(mesg, DeprecationWarning)
return dipole_k(src, rec, depth, res, freq, wavenumber, ab, aniso, epermH,
epermV, mpermH, mpermV, verb) | [
"def",
"wavenumber",
"(",
"src",
",",
"rec",
",",
"depth",
",",
"res",
",",
"freq",
",",
"wavenumber",
",",
"ab",
"=",
"11",
",",
"aniso",
"=",
"None",
",",
"epermH",
"=",
"None",
",",
"epermV",
"=",
"None",
",",
"mpermH",
"=",
"None",
",",
"mper... | r"""Depreciated. Use `dipole_k` instead. | [
"r",
"Depreciated",
".",
"Use",
"dipole_k",
"instead",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/model.py#L1460-L1470 |
26,101 | empymod/empymod | empymod/model.py | tem | def tem(fEM, off, freq, time, signal, ft, ftarg, conv=True):
r"""Return the time-domain response of the frequency-domain response fEM.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
"""
# 1. Scale frequencies if switch-on/off response
# Step function for causal times is like a unit fct, therefore an impulse
# in frequency domain
if signal in [-1, 1]:
# Divide by signal/(2j*pi*f) to obtain step response
fact = signal/(2j*np.pi*freq)
else:
fact = 1
# 2. f->t transform
tEM = np.zeros((time.size, off.size))
for i in range(off.size):
out = getattr(transform, ft)(fEM[:, i]*fact, time, freq, ftarg)
tEM[:, i] += out[0]
conv *= out[1]
return tEM*2/np.pi, conv | python | def tem(fEM, off, freq, time, signal, ft, ftarg, conv=True):
r"""Return the time-domain response of the frequency-domain response fEM.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks.
"""
# 1. Scale frequencies if switch-on/off response
# Step function for causal times is like a unit fct, therefore an impulse
# in frequency domain
if signal in [-1, 1]:
# Divide by signal/(2j*pi*f) to obtain step response
fact = signal/(2j*np.pi*freq)
else:
fact = 1
# 2. f->t transform
tEM = np.zeros((time.size, off.size))
for i in range(off.size):
out = getattr(transform, ft)(fEM[:, i]*fact, time, freq, ftarg)
tEM[:, i] += out[0]
conv *= out[1]
return tEM*2/np.pi, conv | [
"def",
"tem",
"(",
"fEM",
",",
"off",
",",
"freq",
",",
"time",
",",
"signal",
",",
"ft",
",",
"ftarg",
",",
"conv",
"=",
"True",
")",
":",
"# 1. Scale frequencies if switch-on/off response",
"# Step function for causal times is like a unit fct, therefore an impulse",
... | r"""Return the time-domain response of the frequency-domain response fEM.
This function is called from one of the above modelling routines. No
input-check is carried out here. See the main description of :mod:`model`
for information regarding input and output parameters.
This function can be directly used if you are sure the provided input is in
the correct format. This is useful for inversion routines and similar, as
it can speed-up the calculation by omitting input-checks. | [
"r",
"Return",
"the",
"time",
"-",
"domain",
"response",
"of",
"the",
"frequency",
"-",
"domain",
"response",
"fEM",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/model.py#L1565-L1593 |
26,102 | empymod/empymod | empymod/scripts/fdesign.py | save_filter | def save_filter(name, filt, full=None, path='filters'):
r"""Save DLF-filter and inversion output to plain text files."""
# First we'll save the filter using its internal routine.
# This will create the directory ./filters if it doesn't exist already.
filt.tofile(path)
# If full, we store the inversion output
if full:
# Get file name
path = os.path.abspath(path)
if len(name.split('.')) == 2:
suffix = '.gz'
else:
suffix = ''
fullfile = os.path.join(path, name.split('.')[0]+'_full.txt' + suffix)
# Get number of spacing and shift values
nspace, nshift = full[3].shape
# Create header
header = 'Full inversion output from empymod.fdesign.design\n'
header += 'Line 11: Nr of spacing values\n'
header += 'Line 12: Nr of shift values\n'
header += 'Line 13: Best spacing value\n'
header += 'Line 14: Best shift value\n'
header += 'Line 15: Min amplitude or max offset\n'
header += 'Lines 16-{}: Spacing matrix '.format(nspace+15)
header += '({} x {})\n'.format(nspace, nshift)
header += 'Lines {}-{}: Spacing matrix '.format(nspace+16, 2*nspace+15)
header += '({} x {})\n'.format(nspace, nshift)
header += 'Lines {}-{}: Spacing '.format(2*nspace+16, 3*nspace+15)
header += 'matrix ({} x {})\n'.format(nspace, nshift)
header += 'Line {}: Integer: 0: min amp, 1: max r'.format(3*nspace+16)
# Create arrays; put single values in arrays of nshift values
nr_spacing = np.r_[nspace, np.zeros(nshift-1)]
nr_shift = np.r_[nshift, np.zeros(nshift-1)]
best_spacing = np.r_[full[0][0], np.zeros(nshift-1)]
best_shift = np.r_[full[0][1], np.zeros(nshift-1)]
min_value = np.r_[np.atleast_1d(full[1]), np.zeros(nshift-1)]
min_max = np.r_[full[4], np.zeros(nshift-1)]
# Collect all in one array
fullsave = np.vstack((nr_spacing, nr_shift, best_spacing, best_shift,
min_value, full[2][0], full[2][1], full[3],
min_max))
# Save array
np.savetxt(fullfile, fullsave, header=header) | python | def save_filter(name, filt, full=None, path='filters'):
r"""Save DLF-filter and inversion output to plain text files."""
# First we'll save the filter using its internal routine.
# This will create the directory ./filters if it doesn't exist already.
filt.tofile(path)
# If full, we store the inversion output
if full:
# Get file name
path = os.path.abspath(path)
if len(name.split('.')) == 2:
suffix = '.gz'
else:
suffix = ''
fullfile = os.path.join(path, name.split('.')[0]+'_full.txt' + suffix)
# Get number of spacing and shift values
nspace, nshift = full[3].shape
# Create header
header = 'Full inversion output from empymod.fdesign.design\n'
header += 'Line 11: Nr of spacing values\n'
header += 'Line 12: Nr of shift values\n'
header += 'Line 13: Best spacing value\n'
header += 'Line 14: Best shift value\n'
header += 'Line 15: Min amplitude or max offset\n'
header += 'Lines 16-{}: Spacing matrix '.format(nspace+15)
header += '({} x {})\n'.format(nspace, nshift)
header += 'Lines {}-{}: Spacing matrix '.format(nspace+16, 2*nspace+15)
header += '({} x {})\n'.format(nspace, nshift)
header += 'Lines {}-{}: Spacing '.format(2*nspace+16, 3*nspace+15)
header += 'matrix ({} x {})\n'.format(nspace, nshift)
header += 'Line {}: Integer: 0: min amp, 1: max r'.format(3*nspace+16)
# Create arrays; put single values in arrays of nshift values
nr_spacing = np.r_[nspace, np.zeros(nshift-1)]
nr_shift = np.r_[nshift, np.zeros(nshift-1)]
best_spacing = np.r_[full[0][0], np.zeros(nshift-1)]
best_shift = np.r_[full[0][1], np.zeros(nshift-1)]
min_value = np.r_[np.atleast_1d(full[1]), np.zeros(nshift-1)]
min_max = np.r_[full[4], np.zeros(nshift-1)]
# Collect all in one array
fullsave = np.vstack((nr_spacing, nr_shift, best_spacing, best_shift,
min_value, full[2][0], full[2][1], full[3],
min_max))
# Save array
np.savetxt(fullfile, fullsave, header=header) | [
"def",
"save_filter",
"(",
"name",
",",
"filt",
",",
"full",
"=",
"None",
",",
"path",
"=",
"'filters'",
")",
":",
"# First we'll save the filter using its internal routine.",
"# This will create the directory ./filters if it doesn't exist already.",
"filt",
".",
"tofile",
... | r"""Save DLF-filter and inversion output to plain text files. | [
"r",
"Save",
"DLF",
"-",
"filter",
"and",
"inversion",
"output",
"to",
"plain",
"text",
"files",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L469-L523 |
26,103 | empymod/empymod | empymod/scripts/fdesign.py | load_filter | def load_filter(name, full=False, path='filters'):
r"""Load saved DLF-filter and inversion output from text files."""
# First we'll get the filter using its internal routine.
filt = DigitalFilter(name.split('.')[0])
filt.fromfile(path)
# If full, we get the inversion output
if full:
# Try to get the inversion result. If files are not found, most likely
# because they were not stored, we only return the filter
try:
# Get file name
path = os.path.abspath(path)
if len(name.split('.')) == 2:
suffix = '.gz'
else:
suffix = ''
fullfile = os.path.join(path, name.split('.')[0] +
'_full.txt' + suffix)
# Read data
out = np.loadtxt(fullfile)
except IOError:
return filt
# Collect inversion-result tuple
nspace = int(out[0][0])
nshift = int(out[1][0])
space_shift_matrix = np.zeros((2, nspace, nshift))
space_shift_matrix[0, :, :] = out[5:nspace+5, :]
space_shift_matrix[1, :, :] = out[nspace+5:2*nspace+5, :]
out = (np.array([out[2][0], out[3][0]]), out[4][0], space_shift_matrix,
out[2*nspace+5:3*nspace+5, :], int(out[3*nspace+5, 0]))
return filt, out
else:
return filt | python | def load_filter(name, full=False, path='filters'):
r"""Load saved DLF-filter and inversion output from text files."""
# First we'll get the filter using its internal routine.
filt = DigitalFilter(name.split('.')[0])
filt.fromfile(path)
# If full, we get the inversion output
if full:
# Try to get the inversion result. If files are not found, most likely
# because they were not stored, we only return the filter
try:
# Get file name
path = os.path.abspath(path)
if len(name.split('.')) == 2:
suffix = '.gz'
else:
suffix = ''
fullfile = os.path.join(path, name.split('.')[0] +
'_full.txt' + suffix)
# Read data
out = np.loadtxt(fullfile)
except IOError:
return filt
# Collect inversion-result tuple
nspace = int(out[0][0])
nshift = int(out[1][0])
space_shift_matrix = np.zeros((2, nspace, nshift))
space_shift_matrix[0, :, :] = out[5:nspace+5, :]
space_shift_matrix[1, :, :] = out[nspace+5:2*nspace+5, :]
out = (np.array([out[2][0], out[3][0]]), out[4][0], space_shift_matrix,
out[2*nspace+5:3*nspace+5, :], int(out[3*nspace+5, 0]))
return filt, out
else:
return filt | [
"def",
"load_filter",
"(",
"name",
",",
"full",
"=",
"False",
",",
"path",
"=",
"'filters'",
")",
":",
"# First we'll get the filter using its internal routine.",
"filt",
"=",
"DigitalFilter",
"(",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"... | r"""Load saved DLF-filter and inversion output from text files. | [
"r",
"Load",
"saved",
"DLF",
"-",
"filter",
"and",
"inversion",
"output",
"from",
"text",
"files",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L526-L566 |
26,104 | empymod/empymod | empymod/scripts/fdesign.py | plot_result | def plot_result(filt, full, prntres=True):
r"""QC the inversion result.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
- If prntres is True, it calls fdesign.print_result as well.
r"""
# Check matplotlib (soft dependency)
if not plt:
print(plt_msg)
return
if prntres:
print_result(filt, full)
# Get spacing and shift values from full output of brute
spacing = full[2][0, :, 0]
shift = full[2][1, 0, :]
# Get minimum field values from full output of brute
minfield = np.squeeze(full[3])
plt.figure("Brute force result", figsize=(9.5, 4.5))
plt.subplots_adjust(wspace=.4, bottom=0.2)
# Figure 1: Only if more than 1 spacing or more than 1 shift
# Figure of minfield, depending if spacing/shift are vectors or floats
if spacing.size > 1 or shift.size > 1:
plt.subplot(121)
if full[4] == 0: # Min amp
plt.title("Minimal recovered fields")
ylabel = 'Minimal recovered amplitude (log10)'
field = np.log10(minfield)
cmap = plt.cm.viridis
else: # Max r
plt.title("Maximum recovered r")
ylabel = 'Maximum recovered r'
field = 1/minfield
cmap = plt.cm.viridis_r
if shift.size == 1: # (a) if only one shift value,
plt.plot(spacing, field)
plt.xlabel('Spacing')
plt.ylabel(ylabel)
elif spacing.size == 1: # (b) if only one spacing value
plt.plot(shift, field)
plt.xlabel('Shift')
plt.ylabel(ylabel)
else: # (c) if several spacing and several shift values
field = np.ma.masked_where(np.isinf(minfield), field)
plt.pcolormesh(shift, spacing, field, cmap=cmap)
plt.ylabel('Spacing')
plt.xlabel('Shift')
plt.colorbar()
# Figure 2: Filter values
if spacing.size > 1 or shift.size > 1:
plt.subplot(122)
plt.title('Filter values of best filter')
for attr in ['j0', 'j1', 'sin', 'cos']:
if hasattr(filt, attr):
plt.plot(np.log10(filt.base),
np.log10(np.abs(getattr(filt, attr))), '.-', lw=.5,
label='abs('+attr+')')
plt.plot(np.log10(filt.base), np.log10(-getattr(filt, attr)), '.',
color='k', ms=4)
plt.plot(np.inf, 0, '.', color='k', ms=4, label='Neg. values')
plt.xlabel('Base (log10)')
plt.ylabel('Abs(Amplitude) (log10)')
plt.legend(loc='best')
plt.gcf().canvas.draw() # To force draw in notebook while running
plt.show() | python | def plot_result(filt, full, prntres=True):
r"""QC the inversion result.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
- If prntres is True, it calls fdesign.print_result as well.
r"""
# Check matplotlib (soft dependency)
if not plt:
print(plt_msg)
return
if prntres:
print_result(filt, full)
# Get spacing and shift values from full output of brute
spacing = full[2][0, :, 0]
shift = full[2][1, 0, :]
# Get minimum field values from full output of brute
minfield = np.squeeze(full[3])
plt.figure("Brute force result", figsize=(9.5, 4.5))
plt.subplots_adjust(wspace=.4, bottom=0.2)
# Figure 1: Only if more than 1 spacing or more than 1 shift
# Figure of minfield, depending if spacing/shift are vectors or floats
if spacing.size > 1 or shift.size > 1:
plt.subplot(121)
if full[4] == 0: # Min amp
plt.title("Minimal recovered fields")
ylabel = 'Minimal recovered amplitude (log10)'
field = np.log10(minfield)
cmap = plt.cm.viridis
else: # Max r
plt.title("Maximum recovered r")
ylabel = 'Maximum recovered r'
field = 1/minfield
cmap = plt.cm.viridis_r
if shift.size == 1: # (a) if only one shift value,
plt.plot(spacing, field)
plt.xlabel('Spacing')
plt.ylabel(ylabel)
elif spacing.size == 1: # (b) if only one spacing value
plt.plot(shift, field)
plt.xlabel('Shift')
plt.ylabel(ylabel)
else: # (c) if several spacing and several shift values
field = np.ma.masked_where(np.isinf(minfield), field)
plt.pcolormesh(shift, spacing, field, cmap=cmap)
plt.ylabel('Spacing')
plt.xlabel('Shift')
plt.colorbar()
# Figure 2: Filter values
if spacing.size > 1 or shift.size > 1:
plt.subplot(122)
plt.title('Filter values of best filter')
for attr in ['j0', 'j1', 'sin', 'cos']:
if hasattr(filt, attr):
plt.plot(np.log10(filt.base),
np.log10(np.abs(getattr(filt, attr))), '.-', lw=.5,
label='abs('+attr+')')
plt.plot(np.log10(filt.base), np.log10(-getattr(filt, attr)), '.',
color='k', ms=4)
plt.plot(np.inf, 0, '.', color='k', ms=4, label='Neg. values')
plt.xlabel('Base (log10)')
plt.ylabel('Abs(Amplitude) (log10)')
plt.legend(loc='best')
plt.gcf().canvas.draw() # To force draw in notebook while running
plt.show() | [
"def",
"plot_result",
"(",
"filt",
",",
"full",
",",
"prntres",
"=",
"True",
")",
":",
"# Check matplotlib (soft dependency)",
"if",
"not",
"plt",
":",
"print",
"(",
"plt_msg",
")",
"return",
"if",
"prntres",
":",
"print_result",
"(",
"filt",
",",
"full",
... | r"""QC the inversion result.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
- If prntres is True, it calls fdesign.print_result as well.
r | [
"r",
"QC",
"the",
"inversion",
"result",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L573-L648 |
26,105 | empymod/empymod | empymod/scripts/fdesign.py | print_result | def print_result(filt, full=None):
r"""Print best filter information.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
"""
print(' Filter length : %d' % filt.base.size)
print(' Best filter')
if full: # If full provided, we have more information
if full[4] == 0: # Min amp
print(' > Min field : %g' % full[1])
else: # Max amp
r = 1/full[1]
print(' > Max r : %g' % r)
spacing = full[0][0]
shift = full[0][1]
else: # Print what we can without full
n = filt.base.size
a = filt.base[-1]
b = filt.base[-2]
spacing = np.log(a)-np.log(b)
shift = np.log(a)-spacing*(n//2)
print(' > Spacing : %1.10g' % spacing)
print(' > Shift : %1.10g' % shift)
print(' > Base min/max : %e / %e' % (filt.base.min(), filt.base.max())) | python | def print_result(filt, full=None):
r"""Print best filter information.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True
"""
print(' Filter length : %d' % filt.base.size)
print(' Best filter')
if full: # If full provided, we have more information
if full[4] == 0: # Min amp
print(' > Min field : %g' % full[1])
else: # Max amp
r = 1/full[1]
print(' > Max r : %g' % r)
spacing = full[0][0]
shift = full[0][1]
else: # Print what we can without full
n = filt.base.size
a = filt.base[-1]
b = filt.base[-2]
spacing = np.log(a)-np.log(b)
shift = np.log(a)-spacing*(n//2)
print(' > Spacing : %1.10g' % spacing)
print(' > Shift : %1.10g' % shift)
print(' > Base min/max : %e / %e' % (filt.base.min(), filt.base.max())) | [
"def",
"print_result",
"(",
"filt",
",",
"full",
"=",
"None",
")",
":",
"print",
"(",
"' Filter length : %d'",
"%",
"filt",
".",
"base",
".",
"size",
")",
"print",
"(",
"' Best filter'",
")",
"if",
"full",
":",
"# If full provided, we have more information... | r"""Print best filter information.
Parameters
----------
- filt, full as returned from fdesign.design with full_output=True | [
"r",
"Print",
"best",
"filter",
"information",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L651-L678 |
26,106 | empymod/empymod | empymod/scripts/fdesign.py | _call_qc_transform_pairs | def _call_qc_transform_pairs(n, ispacing, ishift, fI, fC, r, r_def, reim):
r"""QC the input transform pairs."""
print('* QC: Input transform-pairs:')
print(' fC: x-range defined through ``n``, ``spacing``, ``shift``, and ' +
'``r``-parameters; b-range defined through ``r``-parameter.')
print(' fI: x- and b-range defined through ``n``, ``spacing``' +
', ``shift``, and ``r_def``-parameters.')
# Calculate min/max k, from minimum and maximum spacing/shift
minspace = np.arange(*ispacing).min()
maxspace = np.arange(*ispacing).max()
minshift = np.arange(*ishift).min()
maxshift = np.arange(*ishift).max()
maxbase = np.exp(maxspace*(n//2) + maxshift)
minbase = np.exp(maxspace*(-n//2+1) + minshift)
# For fC-r (k defined with same amount of points as r)
kmax = maxbase/r.min()
kmin = minbase/r.max()
k = np.logspace(np.log10(kmin), np.log10(kmax) + minspace, r.size)
# For fI-r
rI = np.logspace(np.log10(1/maxbase) - r_def[0],
np.log10(1/minbase) + r_def[1], r_def[2]*n)
kmaxI = maxbase/rI.min()
kminI = minbase/rI.max()
kI = np.logspace(np.log10(kminI), np.log10(kmaxI) + minspace,
r_def[2]*n)
# Plot QC
fig, axs = plt.subplots(figsize=(9.5, 6), nrows=2, ncols=2,
num="Transform pairs")
axs = axs.ravel()
plt.subplots_adjust(wspace=.3, hspace=.4)
_plot_transform_pairs(fC, r, k, axs[:2], 'fC')
if reim == np.real:
tit = 'RE(fI)'
else:
tit = 'IM(fI)'
_plot_transform_pairs(fI, rI, kI, axs[2:], tit)
fig.canvas.draw() # To force draw in notebook while running
plt.show() | python | def _call_qc_transform_pairs(n, ispacing, ishift, fI, fC, r, r_def, reim):
r"""QC the input transform pairs."""
print('* QC: Input transform-pairs:')
print(' fC: x-range defined through ``n``, ``spacing``, ``shift``, and ' +
'``r``-parameters; b-range defined through ``r``-parameter.')
print(' fI: x- and b-range defined through ``n``, ``spacing``' +
', ``shift``, and ``r_def``-parameters.')
# Calculate min/max k, from minimum and maximum spacing/shift
minspace = np.arange(*ispacing).min()
maxspace = np.arange(*ispacing).max()
minshift = np.arange(*ishift).min()
maxshift = np.arange(*ishift).max()
maxbase = np.exp(maxspace*(n//2) + maxshift)
minbase = np.exp(maxspace*(-n//2+1) + minshift)
# For fC-r (k defined with same amount of points as r)
kmax = maxbase/r.min()
kmin = minbase/r.max()
k = np.logspace(np.log10(kmin), np.log10(kmax) + minspace, r.size)
# For fI-r
rI = np.logspace(np.log10(1/maxbase) - r_def[0],
np.log10(1/minbase) + r_def[1], r_def[2]*n)
kmaxI = maxbase/rI.min()
kminI = minbase/rI.max()
kI = np.logspace(np.log10(kminI), np.log10(kmaxI) + minspace,
r_def[2]*n)
# Plot QC
fig, axs = plt.subplots(figsize=(9.5, 6), nrows=2, ncols=2,
num="Transform pairs")
axs = axs.ravel()
plt.subplots_adjust(wspace=.3, hspace=.4)
_plot_transform_pairs(fC, r, k, axs[:2], 'fC')
if reim == np.real:
tit = 'RE(fI)'
else:
tit = 'IM(fI)'
_plot_transform_pairs(fI, rI, kI, axs[2:], tit)
fig.canvas.draw() # To force draw in notebook while running
plt.show() | [
"def",
"_call_qc_transform_pairs",
"(",
"n",
",",
"ispacing",
",",
"ishift",
",",
"fI",
",",
"fC",
",",
"r",
",",
"r_def",
",",
"reim",
")",
":",
"print",
"(",
"'* QC: Input transform-pairs:'",
")",
"print",
"(",
"' fC: x-range defined through ``n``, ``spacing``,... | r"""QC the input transform pairs. | [
"r",
"QC",
"the",
"input",
"transform",
"pairs",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L683-L727 |
26,107 | empymod/empymod | empymod/scripts/fdesign.py | _plot_transform_pairs | def _plot_transform_pairs(fCI, r, k, axes, tit):
r"""Plot the input transform pairs."""
# Plot lhs
plt.sca(axes[0])
plt.title('|' + tit + ' lhs|')
for f in fCI:
if f.name == 'j2':
lhs = f.lhs(k)
plt.loglog(k, np.abs(lhs[0]), lw=2, label='j0')
plt.loglog(k, np.abs(lhs[1]), lw=2, label='j1')
else:
plt.loglog(k, np.abs(f.lhs(k)), lw=2, label=f.name)
if tit != 'fC':
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.sca(axes[1])
plt.title('|' + tit + ' rhs|')
# Transform pair rhs
for f in fCI:
if tit == 'fC':
plt.loglog(r, np.abs(f.rhs), lw=2, label=f.name)
else:
plt.loglog(r, np.abs(f.rhs(r)), lw=2, label=f.name)
# Transform with Key
for f in fCI:
if f.name[1] in ['0', '1', '2']:
filt = j0j1filt()
else:
filt = sincosfilt()
kk = filt.base/r[:, None]
if f.name == 'j2':
lhs = f.lhs(kk)
kr0 = np.dot(lhs[0], getattr(filt, 'j0'))/r
kr1 = np.dot(lhs[1], getattr(filt, 'j1'))/r**2
kr = kr0+kr1
else:
kr = np.dot(f.lhs(kk), getattr(filt, f.name))/r
plt.loglog(r, np.abs(kr), '-.', lw=2, label=filt.name)
if tit != 'fC':
plt.xlabel('r')
plt.legend(loc='best') | python | def _plot_transform_pairs(fCI, r, k, axes, tit):
r"""Plot the input transform pairs."""
# Plot lhs
plt.sca(axes[0])
plt.title('|' + tit + ' lhs|')
for f in fCI:
if f.name == 'j2':
lhs = f.lhs(k)
plt.loglog(k, np.abs(lhs[0]), lw=2, label='j0')
plt.loglog(k, np.abs(lhs[1]), lw=2, label='j1')
else:
plt.loglog(k, np.abs(f.lhs(k)), lw=2, label=f.name)
if tit != 'fC':
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.sca(axes[1])
plt.title('|' + tit + ' rhs|')
# Transform pair rhs
for f in fCI:
if tit == 'fC':
plt.loglog(r, np.abs(f.rhs), lw=2, label=f.name)
else:
plt.loglog(r, np.abs(f.rhs(r)), lw=2, label=f.name)
# Transform with Key
for f in fCI:
if f.name[1] in ['0', '1', '2']:
filt = j0j1filt()
else:
filt = sincosfilt()
kk = filt.base/r[:, None]
if f.name == 'j2':
lhs = f.lhs(kk)
kr0 = np.dot(lhs[0], getattr(filt, 'j0'))/r
kr1 = np.dot(lhs[1], getattr(filt, 'j1'))/r**2
kr = kr0+kr1
else:
kr = np.dot(f.lhs(kk), getattr(filt, f.name))/r
plt.loglog(r, np.abs(kr), '-.', lw=2, label=filt.name)
if tit != 'fC':
plt.xlabel('r')
plt.legend(loc='best') | [
"def",
"_plot_transform_pairs",
"(",
"fCI",
",",
"r",
",",
"k",
",",
"axes",
",",
"tit",
")",
":",
"# Plot lhs",
"plt",
".",
"sca",
"(",
"axes",
"[",
"0",
"]",
")",
"plt",
".",
"title",
"(",
"'|'",
"+",
"tit",
"+",
"' lhs|'",
")",
"for",
"f",
"... | r"""Plot the input transform pairs. | [
"r",
"Plot",
"the",
"input",
"transform",
"pairs",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L730-L778 |
26,108 | empymod/empymod | empymod/scripts/fdesign.py | _plot_inversion | def _plot_inversion(f, rhs, r, k, imin, spacing, shift, cvar):
r"""QC the resulting filter."""
# Check matplotlib (soft dependency)
if not plt:
print(plt_msg)
return
plt.figure("Inversion result "+f.name, figsize=(9.5, 4))
plt.subplots_adjust(wspace=.3, bottom=0.2)
plt.clf()
tk = np.logspace(np.log10(k.min()), np.log10(k.max()), r.size)
plt.suptitle(f.name+'; Spacing ::'+str(spacing)+'; Shift ::'+str(shift))
# Plot lhs
plt.subplot(121)
plt.title('|lhs|')
if f.name == 'j2':
lhs = f.lhs(tk)
plt.loglog(tk, np.abs(lhs[0]), lw=2, label='Theoretical J0')
plt.loglog(tk, np.abs(lhs[1]), lw=2, label='Theoretical J1')
else:
plt.loglog(tk, np.abs(f.lhs(tk)), lw=2, label='Theoretical')
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.subplot(122)
plt.title('|rhs|')
# Transform pair rhs
plt.loglog(r, np.abs(f.rhs), lw=2, label='Theoretical')
# Transform with filter
plt.loglog(r, np.abs(rhs), '-.', lw=2, label='This filter')
# Plot minimum amplitude or max r, respectively
if cvar == 'amp':
label = 'Min. Amp'
else:
label = 'Max. r'
plt.loglog(r[imin], np.abs(rhs[imin]), 'go', label=label)
plt.xlabel('r')
plt.legend(loc='best')
plt.gcf().canvas.draw() # To force draw in notebook while running
plt.show() | python | def _plot_inversion(f, rhs, r, k, imin, spacing, shift, cvar):
r"""QC the resulting filter."""
# Check matplotlib (soft dependency)
if not plt:
print(plt_msg)
return
plt.figure("Inversion result "+f.name, figsize=(9.5, 4))
plt.subplots_adjust(wspace=.3, bottom=0.2)
plt.clf()
tk = np.logspace(np.log10(k.min()), np.log10(k.max()), r.size)
plt.suptitle(f.name+'; Spacing ::'+str(spacing)+'; Shift ::'+str(shift))
# Plot lhs
plt.subplot(121)
plt.title('|lhs|')
if f.name == 'j2':
lhs = f.lhs(tk)
plt.loglog(tk, np.abs(lhs[0]), lw=2, label='Theoretical J0')
plt.loglog(tk, np.abs(lhs[1]), lw=2, label='Theoretical J1')
else:
plt.loglog(tk, np.abs(f.lhs(tk)), lw=2, label='Theoretical')
plt.xlabel('l')
plt.legend(loc='best')
# Plot rhs
plt.subplot(122)
plt.title('|rhs|')
# Transform pair rhs
plt.loglog(r, np.abs(f.rhs), lw=2, label='Theoretical')
# Transform with filter
plt.loglog(r, np.abs(rhs), '-.', lw=2, label='This filter')
# Plot minimum amplitude or max r, respectively
if cvar == 'amp':
label = 'Min. Amp'
else:
label = 'Max. r'
plt.loglog(r[imin], np.abs(rhs[imin]), 'go', label=label)
plt.xlabel('r')
plt.legend(loc='best')
plt.gcf().canvas.draw() # To force draw in notebook while running
plt.show() | [
"def",
"_plot_inversion",
"(",
"f",
",",
"rhs",
",",
"r",
",",
"k",
",",
"imin",
",",
"spacing",
",",
"shift",
",",
"cvar",
")",
":",
"# Check matplotlib (soft dependency)",
"if",
"not",
"plt",
":",
"print",
"(",
"plt_msg",
")",
"return",
"plt",
".",
"... | r"""QC the resulting filter. | [
"r",
"QC",
"the",
"resulting",
"filter",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L781-L829 |
26,109 | empymod/empymod | empymod/scripts/fdesign.py | empy_hankel | def empy_hankel(ftype, zsrc, zrec, res, freqtime, depth=None, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None,
htarg=None, verblhs=0, verbrhs=0):
r"""Numerical transform pair with empymod.
All parameters except ``ftype``, ``verblhs``, and ``verbrhs`` correspond to
the input parameters to ``empymod.dipole``. See there for more information.
Note that if depth=None or [], the analytical full-space solutions will be
used (much faster).
Parameters
----------
ftype : str or list of strings
Either of: {'j0', 'j1', 'j2', ['j0', 'j1']}
- 'j0': Analyze J0-term with ab=11, angle=45°
- 'j1': Analyze J1-term with ab=31, angle=0°
- 'j2': Analyze J0- and J1-terms jointly with ab=12, angle=45°
- ['j0', 'j1']: Same as calling empy_hankel twice, once with 'j0' and
one with 'j1'; can be provided like this to
fdesign.design.
verblhs, verbrhs: int
verb-values provided to empymod for lhs and rhs.
Note that ftype='j2' only works for fC, not for fI.
"""
# Loop over ftypes, if there are several
if isinstance(ftype, list):
out = []
for f in ftype:
out.append(empy_hankel(f, zsrc, zrec, res, freqtime, depth, aniso,
epermH, epermV, mpermH, mpermV, htarg,
verblhs, verbrhs))
return out
# Collect model
model = {'src': [0, 0, zsrc],
'depth': depth,
'res': res,
'aniso': aniso,
'epermH': epermH,
'epermV': epermV,
'mpermH': mpermH,
'mpermV': mpermV}
# Finalize model depending on ftype
if ftype == 'j0': # J0: 11, 45°
model['ab'] = 11
x = 1/np.sqrt(2)
y = 1/np.sqrt(2)
elif ftype == 'j1': # J1: 31, 0°
model['ab'] = 31
x = 1
y = 0
elif ftype == 'j2': # J2: 12, 45°
model['ab'] = 12
x = 1/np.sqrt(2)
y = 1/np.sqrt(2)
# rhs: empymod.model.dipole
# If depth=[], the analytical full-space solution will be used internally
def rhs(r):
out = dipole(rec=[r*x, r*y, zrec], ht='qwe', xdirect=True,
verb=verbrhs, htarg=htarg, freqtime=freqtime, **model)
return out
# lhs: empymod.model.dipole_k
def lhs(k):
lhs0, lhs1 = dipole_k(rec=[x, y, zrec], wavenumber=k, verb=verblhs,
freq=freqtime, **model)
if ftype == 'j0':
return lhs0
elif ftype == 'j1':
return lhs1
elif ftype == 'j2':
return (lhs0, lhs1)
return Ghosh(ftype, lhs, rhs) | python | def empy_hankel(ftype, zsrc, zrec, res, freqtime, depth=None, aniso=None,
epermH=None, epermV=None, mpermH=None, mpermV=None,
htarg=None, verblhs=0, verbrhs=0):
r"""Numerical transform pair with empymod.
All parameters except ``ftype``, ``verblhs``, and ``verbrhs`` correspond to
the input parameters to ``empymod.dipole``. See there for more information.
Note that if depth=None or [], the analytical full-space solutions will be
used (much faster).
Parameters
----------
ftype : str or list of strings
Either of: {'j0', 'j1', 'j2', ['j0', 'j1']}
- 'j0': Analyze J0-term with ab=11, angle=45°
- 'j1': Analyze J1-term with ab=31, angle=0°
- 'j2': Analyze J0- and J1-terms jointly with ab=12, angle=45°
- ['j0', 'j1']: Same as calling empy_hankel twice, once with 'j0' and
one with 'j1'; can be provided like this to
fdesign.design.
verblhs, verbrhs: int
verb-values provided to empymod for lhs and rhs.
Note that ftype='j2' only works for fC, not for fI.
"""
# Loop over ftypes, if there are several
if isinstance(ftype, list):
out = []
for f in ftype:
out.append(empy_hankel(f, zsrc, zrec, res, freqtime, depth, aniso,
epermH, epermV, mpermH, mpermV, htarg,
verblhs, verbrhs))
return out
# Collect model
model = {'src': [0, 0, zsrc],
'depth': depth,
'res': res,
'aniso': aniso,
'epermH': epermH,
'epermV': epermV,
'mpermH': mpermH,
'mpermV': mpermV}
# Finalize model depending on ftype
if ftype == 'j0': # J0: 11, 45°
model['ab'] = 11
x = 1/np.sqrt(2)
y = 1/np.sqrt(2)
elif ftype == 'j1': # J1: 31, 0°
model['ab'] = 31
x = 1
y = 0
elif ftype == 'j2': # J2: 12, 45°
model['ab'] = 12
x = 1/np.sqrt(2)
y = 1/np.sqrt(2)
# rhs: empymod.model.dipole
# If depth=[], the analytical full-space solution will be used internally
def rhs(r):
out = dipole(rec=[r*x, r*y, zrec], ht='qwe', xdirect=True,
verb=verbrhs, htarg=htarg, freqtime=freqtime, **model)
return out
# lhs: empymod.model.dipole_k
def lhs(k):
lhs0, lhs1 = dipole_k(rec=[x, y, zrec], wavenumber=k, verb=verblhs,
freq=freqtime, **model)
if ftype == 'j0':
return lhs0
elif ftype == 'j1':
return lhs1
elif ftype == 'j2':
return (lhs0, lhs1)
return Ghosh(ftype, lhs, rhs) | [
"def",
"empy_hankel",
"(",
"ftype",
",",
"zsrc",
",",
"zrec",
",",
"res",
",",
"freqtime",
",",
"depth",
"=",
"None",
",",
"aniso",
"=",
"None",
",",
"epermH",
"=",
"None",
",",
"epermV",
"=",
"None",
",",
"mpermH",
"=",
"None",
",",
"mpermV",
"=",... | r"""Numerical transform pair with empymod.
All parameters except ``ftype``, ``verblhs``, and ``verbrhs`` correspond to
the input parameters to ``empymod.dipole``. See there for more information.
Note that if depth=None or [], the analytical full-space solutions will be
used (much faster).
Parameters
----------
ftype : str or list of strings
Either of: {'j0', 'j1', 'j2', ['j0', 'j1']}
- 'j0': Analyze J0-term with ab=11, angle=45°
- 'j1': Analyze J1-term with ab=31, angle=0°
- 'j2': Analyze J0- and J1-terms jointly with ab=12, angle=45°
- ['j0', 'j1']: Same as calling empy_hankel twice, once with 'j0' and
one with 'j1'; can be provided like this to
fdesign.design.
verblhs, verbrhs: int
verb-values provided to empymod for lhs and rhs.
Note that ftype='j2' only works for fC, not for fI. | [
"r",
"Numerical",
"transform",
"pair",
"with",
"empymod",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L1109-L1192 |
26,110 | empymod/empymod | empymod/scripts/fdesign.py | _get_min_val | def _get_min_val(spaceshift, *params):
r"""Calculate minimum resolved amplitude or maximum r."""
# Get parameters from tuples
spacing, shift = spaceshift
n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log = params
# Get filter for these parameters
dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt')
# Calculate rhs-response with this filter
k = dlf.base/r[:, None]
# Loop over transforms
for i, f in enumerate(fC):
# Calculate lhs and rhs; rhs depends on ftype
lhs = f.lhs(k)
if f.name == 'j2':
rhs0 = np.dot(lhs[0], getattr(dlf, 'j0'))/r
rhs1 = np.dot(lhs[1], getattr(dlf, 'j1'))/r**2
rhs = rhs0 + rhs1
else:
rhs = np.dot(lhs, getattr(dlf, f.name))/r
# Get relative error
rel_error = np.abs((rhs - f.rhs)/f.rhs)
# Get indices where relative error is bigger than error
imin0 = np.where(rel_error > error)[0]
# Find first occurrence of failure
if np.all(rhs == 0) or np.all(np.isnan(rhs)):
# if all rhs are zeros or nans, the filter is useless
imin0 = 0
elif imin0.size == 0:
# if imin0.size == 0: # empty array, all rel_error < error.
imin0 = rhs.size-1 # set to last r
if verb > 0 and log['warn-r'] == 0:
print('* WARNING :: all data have error < ' + str(error) +
'; choose larger r or set error-level higher.')
log['warn-r'] = 1 # Only do this once
else:
# Kind of a dirty hack: Permit to jump up to four bad values,
# resulting for instance from high rel_error from zero crossings
# of the transform pair. Should be made an input argument or
# generally improved.
if imin0.size > 4:
imin0 = np.max([0, imin0[4]-5])
else: # just take the first one (no jumping allowed; normal case)
imin0 = np.max([0, imin0[0]-1])
# Note that both version yield the same result if the failure is
# consistent.
# Depending on cvar, store minimum amplitude or 1/maxr
if cvar == 'amp':
min_val0 = np.abs(rhs[imin0])
else:
min_val0 = 1/r[imin0]
# Check if this inversion is better than previous ones
if i == 0: # First run, store these values
imin = dc(imin0)
min_val = dc(min_val0)
else: # Replace imin, min_val if this one is better
if min_val0 > min_val:
min_val = dc(min_val0)
imin = dc(imin0)
# QC plot
if plot > 2:
_plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar)
# If verbose, print progress
if verb > 1:
log = _print_count(log)
# If there is no point with rel_error < error (imin=0) it returns np.inf.
return np.where(imin == 0, np.inf, min_val) | python | def _get_min_val(spaceshift, *params):
r"""Calculate minimum resolved amplitude or maximum r."""
# Get parameters from tuples
spacing, shift = spaceshift
n, fI, fC, r, r_def, error, reim, cvar, verb, plot, log = params
# Get filter for these parameters
dlf = _calculate_filter(n, spacing, shift, fI, r_def, reim, 'filt')
# Calculate rhs-response with this filter
k = dlf.base/r[:, None]
# Loop over transforms
for i, f in enumerate(fC):
# Calculate lhs and rhs; rhs depends on ftype
lhs = f.lhs(k)
if f.name == 'j2':
rhs0 = np.dot(lhs[0], getattr(dlf, 'j0'))/r
rhs1 = np.dot(lhs[1], getattr(dlf, 'j1'))/r**2
rhs = rhs0 + rhs1
else:
rhs = np.dot(lhs, getattr(dlf, f.name))/r
# Get relative error
rel_error = np.abs((rhs - f.rhs)/f.rhs)
# Get indices where relative error is bigger than error
imin0 = np.where(rel_error > error)[0]
# Find first occurrence of failure
if np.all(rhs == 0) or np.all(np.isnan(rhs)):
# if all rhs are zeros or nans, the filter is useless
imin0 = 0
elif imin0.size == 0:
# if imin0.size == 0: # empty array, all rel_error < error.
imin0 = rhs.size-1 # set to last r
if verb > 0 and log['warn-r'] == 0:
print('* WARNING :: all data have error < ' + str(error) +
'; choose larger r or set error-level higher.')
log['warn-r'] = 1 # Only do this once
else:
# Kind of a dirty hack: Permit to jump up to four bad values,
# resulting for instance from high rel_error from zero crossings
# of the transform pair. Should be made an input argument or
# generally improved.
if imin0.size > 4:
imin0 = np.max([0, imin0[4]-5])
else: # just take the first one (no jumping allowed; normal case)
imin0 = np.max([0, imin0[0]-1])
# Note that both version yield the same result if the failure is
# consistent.
# Depending on cvar, store minimum amplitude or 1/maxr
if cvar == 'amp':
min_val0 = np.abs(rhs[imin0])
else:
min_val0 = 1/r[imin0]
# Check if this inversion is better than previous ones
if i == 0: # First run, store these values
imin = dc(imin0)
min_val = dc(min_val0)
else: # Replace imin, min_val if this one is better
if min_val0 > min_val:
min_val = dc(min_val0)
imin = dc(imin0)
# QC plot
if plot > 2:
_plot_inversion(f, rhs, r, k, imin0, spacing, shift, cvar)
# If verbose, print progress
if verb > 1:
log = _print_count(log)
# If there is no point with rel_error < error (imin=0) it returns np.inf.
return np.where(imin == 0, np.inf, min_val) | [
"def",
"_get_min_val",
"(",
"spaceshift",
",",
"*",
"params",
")",
":",
"# Get parameters from tuples",
"spacing",
",",
"shift",
"=",
"spaceshift",
"n",
",",
"fI",
",",
"fC",
",",
"r",
",",
"r_def",
",",
"error",
",",
"reim",
",",
"cvar",
",",
"verb",
... | r"""Calculate minimum resolved amplitude or maximum r. | [
"r",
"Calculate",
"minimum",
"resolved",
"amplitude",
"or",
"maximum",
"r",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L1197-L1276 |
26,111 | empymod/empymod | empymod/scripts/fdesign.py | _calculate_filter | def _calculate_filter(n, spacing, shift, fI, r_def, reim, name):
r"""Calculate filter for this spacing, shift, n."""
# Base :: For this n/spacing/shift
base = np.exp(spacing*(np.arange(n)-n//2) + shift)
# r :: Start/end is defined by base AND r_def[0]/r_def[1]
# Overdetermined system if r_def[2] > 1
r = np.logspace(np.log10(1/np.max(base)) - r_def[0],
np.log10(1/np.min(base)) + r_def[1], r_def[2]*n)
# k :: Get required k-values (matrix of shape (r.size, base.size))
k = base/r[:, None]
# Create filter instance
dlf = DigitalFilter(name.split('.')[0])
dlf.base = base
dlf.factor = np.around(np.average(base[1:]/base[:-1]), 15)
# Loop over transforms
for f in fI:
# Calculate lhs and rhs for inversion
lhs = reim(f.lhs(k))
rhs = reim(f.rhs(r)*r)
# Calculate filter values: Solve lhs*J=rhs using linalg.qr.
# If factoring fails (qr) or if matrix is singular or square (solve) it
# will raise a LinAlgError. Error is ignored and zeros are returned
# instead.
try:
qq, rr = np.linalg.qr(lhs)
J = np.linalg.solve(rr, rhs.dot(qq))
except np.linalg.LinAlgError:
J = np.zeros((base.size,))
setattr(dlf, f.name, J)
return dlf | python | def _calculate_filter(n, spacing, shift, fI, r_def, reim, name):
r"""Calculate filter for this spacing, shift, n."""
# Base :: For this n/spacing/shift
base = np.exp(spacing*(np.arange(n)-n//2) + shift)
# r :: Start/end is defined by base AND r_def[0]/r_def[1]
# Overdetermined system if r_def[2] > 1
r = np.logspace(np.log10(1/np.max(base)) - r_def[0],
np.log10(1/np.min(base)) + r_def[1], r_def[2]*n)
# k :: Get required k-values (matrix of shape (r.size, base.size))
k = base/r[:, None]
# Create filter instance
dlf = DigitalFilter(name.split('.')[0])
dlf.base = base
dlf.factor = np.around(np.average(base[1:]/base[:-1]), 15)
# Loop over transforms
for f in fI:
# Calculate lhs and rhs for inversion
lhs = reim(f.lhs(k))
rhs = reim(f.rhs(r)*r)
# Calculate filter values: Solve lhs*J=rhs using linalg.qr.
# If factoring fails (qr) or if matrix is singular or square (solve) it
# will raise a LinAlgError. Error is ignored and zeros are returned
# instead.
try:
qq, rr = np.linalg.qr(lhs)
J = np.linalg.solve(rr, rhs.dot(qq))
except np.linalg.LinAlgError:
J = np.zeros((base.size,))
setattr(dlf, f.name, J)
return dlf | [
"def",
"_calculate_filter",
"(",
"n",
",",
"spacing",
",",
"shift",
",",
"fI",
",",
"r_def",
",",
"reim",
",",
"name",
")",
":",
"# Base :: For this n/spacing/shift",
"base",
"=",
"np",
".",
"exp",
"(",
"spacing",
"*",
"(",
"np",
".",
"arange",
"(",
"n... | r"""Calculate filter for this spacing, shift, n. | [
"r",
"Calculate",
"filter",
"for",
"this",
"spacing",
"shift",
"n",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L1279-L1316 |
26,112 | empymod/empymod | empymod/scripts/fdesign.py | _print_count | def _print_count(log):
r"""Print run-count information."""
log['cnt2'] += 1 # Current number
cp = log['cnt2']/log['totnr']*100 # Percentage
if log['cnt2'] == 0: # Not sure about this; brute seems to call the
pass # function with the first arguments twice...
elif log['cnt2'] > log['totnr']: # fmin-status
print(" fmin fct calls : %d" % (log['cnt2']-log['totnr']), end='\r')
elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
# Get seconds since start
sec = int(default_timer() - log['time'])
# Get estimate of remaining time, as string
tleft = str(timedelta(seconds=int(100*sec/cp - sec)))
# Print progress
pstr = (" brute fct calls : %d/%d"
% (log['cnt2'], log['totnr']))
if log['totnr'] > 100:
pstr += (" (%d %%); est: %s " % (cp, tleft))
print(pstr, end='\r')
if log['cnt2'] == log['totnr']:
# Empty previous line
print(" "*len(pstr), end='\r')
# Print final brute-message
print(" brute fct calls : %d" % log['totnr'])
# Update percentage cnt1
log['cnt1'] = cp
return log | python | def _print_count(log):
r"""Print run-count information."""
log['cnt2'] += 1 # Current number
cp = log['cnt2']/log['totnr']*100 # Percentage
if log['cnt2'] == 0: # Not sure about this; brute seems to call the
pass # function with the first arguments twice...
elif log['cnt2'] > log['totnr']: # fmin-status
print(" fmin fct calls : %d" % (log['cnt2']-log['totnr']), end='\r')
elif int(cp) > log['cnt1'] or cp < 1 or log['cnt2'] == log['totnr']:
# Get seconds since start
sec = int(default_timer() - log['time'])
# Get estimate of remaining time, as string
tleft = str(timedelta(seconds=int(100*sec/cp - sec)))
# Print progress
pstr = (" brute fct calls : %d/%d"
% (log['cnt2'], log['totnr']))
if log['totnr'] > 100:
pstr += (" (%d %%); est: %s " % (cp, tleft))
print(pstr, end='\r')
if log['cnt2'] == log['totnr']:
# Empty previous line
print(" "*len(pstr), end='\r')
# Print final brute-message
print(" brute fct calls : %d" % log['totnr'])
# Update percentage cnt1
log['cnt1'] = cp
return log | [
"def",
"_print_count",
"(",
"log",
")",
":",
"log",
"[",
"'cnt2'",
"]",
"+=",
"1",
"# Current number",
"cp",
"=",
"log",
"[",
"'cnt2'",
"]",
"/",
"log",
"[",
"'totnr'",
"]",
"*",
"100",
"# Percentage",
"if",
"log",
"[",
"'cnt2'",
"]",
"==",
"0",
":... | r"""Print run-count information. | [
"r",
"Print",
"run",
"-",
"count",
"information",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/fdesign.py#L1345-L1380 |
26,113 | empymod/empymod | empymod/kernel.py | wavenumber | def wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, lambd,
ab, xdirect, msrc, mrec, use_ne_eval):
r"""Calculate wavenumber domain solution.
Return the wavenumber domain solutions ``PJ0``, ``PJ1``, and ``PJ0b``,
which have to be transformed with a Hankel transform to the frequency
domain. ``PJ0``/``PJ0b`` and ``PJ1`` have to be transformed with Bessel
functions of order 0 (:math:`J_0`) and 1 (:math:`J_1`), respectively.
This function corresponds loosely to equations 105--107, 111--116,
119--121, and 123--128 in [HuTS15]_, and equally loosely to the file
``kxwmod.c``.
[HuTS15]_ uses Bessel functions of orders 0, 1, and 2 (:math:`J_0, J_1,
J_2`). The implementations of the *Fast Hankel Transform* and the
*Quadrature-with-Extrapolation* in ``transform`` are set-up with Bessel
functions of order 0 and 1 only. This is achieved by applying the
recurrence formula
.. math:: J_2(kr) = \frac{2}{kr} J_1(kr) - J_0(kr) \ .
.. note::
``PJ0`` and ``PJ0b`` could theoretically be added here into one, and
then be transformed in one go. However, ``PJ0b`` has to be multiplied
by ``factAng`` later. This has to be done after the Hankel transform
for methods which make use of spline interpolation, in order to work
for offsets that are not in line with each other.
This function is called from one of the Hankel functions in
:mod:`transform`. Consult the modelling routines in :mod:`model` for a
description of the input and output parameters.
If you are solely interested in the wavenumber-domain solution you can call
this function directly. However, you have to make sure all input arguments
are correct, as no checks are carried out here.
"""
# ** CALCULATE GREEN'S FUNCTIONS
# Shape of PTM, PTE: (nfreq, noffs, nfilt)
PTM, PTE = greenfct(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec, use_ne_eval)
# ** AB-SPECIFIC COLLECTION OF PJ0, PJ1, AND PJ0b
# Pre-allocate output
PJ0 = None
PJ1 = None
PJ0b = None
# Calculate Ptot which is used in all cases
Ptot = (PTM + PTE)/(4*np.pi)
# If rec is magnetic switch sign (reciprocity MM/ME => EE/EM).
if mrec:
sign = -1
else:
sign = 1
# Group into PJ0 and PJ1 for J0/J1 Hankel Transform
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Eqs 105, 106, 111, 112,
# J2(kr) = 2/(kr)*J1(kr) - J0(kr) # 119, 120, 123, 124
if ab in [14, 22]:
sign *= -1
PJ0b = sign/2*Ptot*lambd
PJ1 = -sign*Ptot
if ab in [11, 22, 24, 15]:
if ab in [22, 24]:
sign *= -1
PJ0 = sign*(PTM - PTE)/(8*np.pi)*lambd
elif ab in [13, 23, 31, 32, 34, 35, 16, 26]: # Eqs 107, 113, 114, 115,
PJ1 = sign*Ptot*lambd*lambd # . 121, 125, 126, 127
if ab in [34, 26]:
PJ1 *= -1
elif ab in [33, ]: # Eq 116
PJ0 = sign*Ptot*lambd*lambd*lambd
# Return PJ0, PJ1, PJ0b
return PJ0, PJ1, PJ0b | python | def wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH, zetaV, lambd,
ab, xdirect, msrc, mrec, use_ne_eval):
r"""Calculate wavenumber domain solution.
Return the wavenumber domain solutions ``PJ0``, ``PJ1``, and ``PJ0b``,
which have to be transformed with a Hankel transform to the frequency
domain. ``PJ0``/``PJ0b`` and ``PJ1`` have to be transformed with Bessel
functions of order 0 (:math:`J_0`) and 1 (:math:`J_1`), respectively.
This function corresponds loosely to equations 105--107, 111--116,
119--121, and 123--128 in [HuTS15]_, and equally loosely to the file
``kxwmod.c``.
[HuTS15]_ uses Bessel functions of orders 0, 1, and 2 (:math:`J_0, J_1,
J_2`). The implementations of the *Fast Hankel Transform* and the
*Quadrature-with-Extrapolation* in ``transform`` are set-up with Bessel
functions of order 0 and 1 only. This is achieved by applying the
recurrence formula
.. math:: J_2(kr) = \frac{2}{kr} J_1(kr) - J_0(kr) \ .
.. note::
``PJ0`` and ``PJ0b`` could theoretically be added here into one, and
then be transformed in one go. However, ``PJ0b`` has to be multiplied
by ``factAng`` later. This has to be done after the Hankel transform
for methods which make use of spline interpolation, in order to work
for offsets that are not in line with each other.
This function is called from one of the Hankel functions in
:mod:`transform`. Consult the modelling routines in :mod:`model` for a
description of the input and output parameters.
If you are solely interested in the wavenumber-domain solution you can call
this function directly. However, you have to make sure all input arguments
are correct, as no checks are carried out here.
"""
# ** CALCULATE GREEN'S FUNCTIONS
# Shape of PTM, PTE: (nfreq, noffs, nfilt)
PTM, PTE = greenfct(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec, use_ne_eval)
# ** AB-SPECIFIC COLLECTION OF PJ0, PJ1, AND PJ0b
# Pre-allocate output
PJ0 = None
PJ1 = None
PJ0b = None
# Calculate Ptot which is used in all cases
Ptot = (PTM + PTE)/(4*np.pi)
# If rec is magnetic switch sign (reciprocity MM/ME => EE/EM).
if mrec:
sign = -1
else:
sign = 1
# Group into PJ0 and PJ1 for J0/J1 Hankel Transform
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Eqs 105, 106, 111, 112,
# J2(kr) = 2/(kr)*J1(kr) - J0(kr) # 119, 120, 123, 124
if ab in [14, 22]:
sign *= -1
PJ0b = sign/2*Ptot*lambd
PJ1 = -sign*Ptot
if ab in [11, 22, 24, 15]:
if ab in [22, 24]:
sign *= -1
PJ0 = sign*(PTM - PTE)/(8*np.pi)*lambd
elif ab in [13, 23, 31, 32, 34, 35, 16, 26]: # Eqs 107, 113, 114, 115,
PJ1 = sign*Ptot*lambd*lambd # . 121, 125, 126, 127
if ab in [34, 26]:
PJ1 *= -1
elif ab in [33, ]: # Eq 116
PJ0 = sign*Ptot*lambd*lambd*lambd
# Return PJ0, PJ1, PJ0b
return PJ0, PJ1, PJ0b | [
"def",
"wavenumber",
"(",
"zsrc",
",",
"zrec",
",",
"lsrc",
",",
"lrec",
",",
"depth",
",",
"etaH",
",",
"etaV",
",",
"zetaH",
",",
"zetaV",
",",
"lambd",
",",
"ab",
",",
"xdirect",
",",
"msrc",
",",
"mrec",
",",
"use_ne_eval",
")",
":",
"# ** CALC... | r"""Calculate wavenumber domain solution.
Return the wavenumber domain solutions ``PJ0``, ``PJ1``, and ``PJ0b``,
which have to be transformed with a Hankel transform to the frequency
domain. ``PJ0``/``PJ0b`` and ``PJ1`` have to be transformed with Bessel
functions of order 0 (:math:`J_0`) and 1 (:math:`J_1`), respectively.
This function corresponds loosely to equations 105--107, 111--116,
119--121, and 123--128 in [HuTS15]_, and equally loosely to the file
``kxwmod.c``.
[HuTS15]_ uses Bessel functions of orders 0, 1, and 2 (:math:`J_0, J_1,
J_2`). The implementations of the *Fast Hankel Transform* and the
*Quadrature-with-Extrapolation* in ``transform`` are set-up with Bessel
functions of order 0 and 1 only. This is achieved by applying the
recurrence formula
.. math:: J_2(kr) = \frac{2}{kr} J_1(kr) - J_0(kr) \ .
.. note::
``PJ0`` and ``PJ0b`` could theoretically be added here into one, and
then be transformed in one go. However, ``PJ0b`` has to be multiplied
by ``factAng`` later. This has to be done after the Hankel transform
for methods which make use of spline interpolation, in order to work
for offsets that are not in line with each other.
This function is called from one of the Hankel functions in
:mod:`transform`. Consult the modelling routines in :mod:`model` for a
description of the input and output parameters.
If you are solely interested in the wavenumber-domain solution you can call
this function directly. However, you have to make sure all input arguments
are correct, as no checks are carried out here. | [
"r",
"Calculate",
"wavenumber",
"domain",
"solution",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/kernel.py#L47-L129 |
26,114 | empymod/empymod | empymod/kernel.py | reflections | def reflections(depth, e_zH, Gam, lrec, lsrc, use_ne_eval):
r"""Calculate Rp, Rm.
.. math:: R^\pm_n, \bar{R}^\pm_n
This function corresponds to equations 64/65 and A-11/A-12 in
[HuTS15]_, and loosely to the corresponding files ``Rmin.F90`` and
``Rplus.F90``.
This function is called from the function :mod:`kernel.greenfct`.
"""
# Loop over Rp, Rm
for plus in [True, False]:
# Switches depending if plus or minus
if plus:
pm = 1
layer_count = np.arange(depth.size-2, min(lrec, lsrc)-1, -1)
izout = abs(lsrc-lrec)
minmax = max(lrec, lsrc)
else:
pm = -1
layer_count = np.arange(1, max(lrec, lsrc)+1, 1)
izout = 0
minmax = -min(lrec, lsrc)
# If rec in last and rec below src (plus) or
# if rec in first and rec above src (minus), shift izout
shiftplus = lrec < lsrc and lrec == 0 and not plus
shiftminus = lrec > lsrc and lrec == depth.size-1 and plus
if shiftplus or shiftminus:
izout -= pm
# Pre-allocate Ref
Ref = np.zeros((Gam.shape[0], Gam.shape[1], abs(lsrc-lrec)+1,
Gam.shape[3]), dtype=complex)
# Calculate the reflection
for iz in layer_count:
# Eqs 65, A-12
e_zHa = e_zH[:, None, iz+pm, None]
Gama = Gam[:, :, iz, :]
e_zHb = e_zH[:, None, iz, None]
Gamb = Gam[:, :, iz+pm, :]
if use_ne_eval:
rlocstr = "(e_zHa*Gama - e_zHb*Gamb)/(e_zHa*Gama + e_zHb*Gamb)"
rloc = use_ne_eval(rlocstr)
else:
rloca = e_zHa*Gama
rlocb = e_zHb*Gamb
rloc = (rloca - rlocb)/(rloca + rlocb)
# In first layer tRef = rloc
if iz == layer_count[0]:
tRef = rloc.copy()
else:
ddepth = depth[iz+1+pm]-depth[iz+pm]
# Eqs 64, A-11
if use_ne_eval:
term = use_ne_eval("tRef*exp(-2*Gamb*ddepth)")
tRef = use_ne_eval("(rloc + term)/(1 + rloc*term)")
else:
term = tRef*np.exp(-2*Gamb*ddepth) # NOQA
tRef = (rloc + term)/(1 + rloc*term)
# The global reflection coefficient is given back for all layers
# between and including src- and rec-layer
if lrec != lsrc and pm*iz <= minmax:
Ref[:, :, izout, :] = tRef[:]
izout -= pm
# If lsrc = lrec, we just store the last values
if lsrc == lrec and layer_count.size > 0:
Ref = tRef
# Store Ref in Rm/Rp
if plus:
Rm = Ref
else:
Rp = Ref
# Return reflections (minus and plus)
return Rm, Rp | python | def reflections(depth, e_zH, Gam, lrec, lsrc, use_ne_eval):
r"""Calculate Rp, Rm.
.. math:: R^\pm_n, \bar{R}^\pm_n
This function corresponds to equations 64/65 and A-11/A-12 in
[HuTS15]_, and loosely to the corresponding files ``Rmin.F90`` and
``Rplus.F90``.
This function is called from the function :mod:`kernel.greenfct`.
"""
# Loop over Rp, Rm
for plus in [True, False]:
# Switches depending if plus or minus
if plus:
pm = 1
layer_count = np.arange(depth.size-2, min(lrec, lsrc)-1, -1)
izout = abs(lsrc-lrec)
minmax = max(lrec, lsrc)
else:
pm = -1
layer_count = np.arange(1, max(lrec, lsrc)+1, 1)
izout = 0
minmax = -min(lrec, lsrc)
# If rec in last and rec below src (plus) or
# if rec in first and rec above src (minus), shift izout
shiftplus = lrec < lsrc and lrec == 0 and not plus
shiftminus = lrec > lsrc and lrec == depth.size-1 and plus
if shiftplus or shiftminus:
izout -= pm
# Pre-allocate Ref
Ref = np.zeros((Gam.shape[0], Gam.shape[1], abs(lsrc-lrec)+1,
Gam.shape[3]), dtype=complex)
# Calculate the reflection
for iz in layer_count:
# Eqs 65, A-12
e_zHa = e_zH[:, None, iz+pm, None]
Gama = Gam[:, :, iz, :]
e_zHb = e_zH[:, None, iz, None]
Gamb = Gam[:, :, iz+pm, :]
if use_ne_eval:
rlocstr = "(e_zHa*Gama - e_zHb*Gamb)/(e_zHa*Gama + e_zHb*Gamb)"
rloc = use_ne_eval(rlocstr)
else:
rloca = e_zHa*Gama
rlocb = e_zHb*Gamb
rloc = (rloca - rlocb)/(rloca + rlocb)
# In first layer tRef = rloc
if iz == layer_count[0]:
tRef = rloc.copy()
else:
ddepth = depth[iz+1+pm]-depth[iz+pm]
# Eqs 64, A-11
if use_ne_eval:
term = use_ne_eval("tRef*exp(-2*Gamb*ddepth)")
tRef = use_ne_eval("(rloc + term)/(1 + rloc*term)")
else:
term = tRef*np.exp(-2*Gamb*ddepth) # NOQA
tRef = (rloc + term)/(1 + rloc*term)
# The global reflection coefficient is given back for all layers
# between and including src- and rec-layer
if lrec != lsrc and pm*iz <= minmax:
Ref[:, :, izout, :] = tRef[:]
izout -= pm
# If lsrc = lrec, we just store the last values
if lsrc == lrec and layer_count.size > 0:
Ref = tRef
# Store Ref in Rm/Rp
if plus:
Rm = Ref
else:
Rp = Ref
# Return reflections (minus and plus)
return Rm, Rp | [
"def",
"reflections",
"(",
"depth",
",",
"e_zH",
",",
"Gam",
",",
"lrec",
",",
"lsrc",
",",
"use_ne_eval",
")",
":",
"# Loop over Rp, Rm",
"for",
"plus",
"in",
"[",
"True",
",",
"False",
"]",
":",
"# Switches depending if plus or minus",
"if",
"plus",
":",
... | r"""Calculate Rp, Rm.
.. math:: R^\pm_n, \bar{R}^\pm_n
This function corresponds to equations 64/65 and A-11/A-12 in
[HuTS15]_, and loosely to the corresponding files ``Rmin.F90`` and
``Rplus.F90``.
This function is called from the function :mod:`kernel.greenfct`. | [
"r",
"Calculate",
"Rp",
"Rm",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/kernel.py#L316-L402 |
26,115 | empymod/empymod | empymod/kernel.py | angle_factor | def angle_factor(angle, ab, msrc, mrec):
r"""Return the angle-dependent factor.
The whole calculation in the wavenumber domain is only a function of the
distance between the source and the receiver, it is independent of the
angel. The angle-dependency is this factor, which can be applied to the
corresponding parts in the wavenumber or in the frequency domain.
The ``angle_factor`` corresponds to the sine and cosine-functions in Eqs
105-107, 111-116, 119-121, 123-128.
This function is called from one of the Hankel functions in
:mod:`transform`. Consult the modelling routines in :mod:`model` for a
description of the input and output parameters.
"""
# 33/66 are completely symmetric and hence independent of angle
if ab in [33, ]:
return np.ones(angle.size)
# Evaluation angle
eval_angle = angle.copy()
# Add pi if receiver is magnetic (reciprocity), but not if source is
# electric, because then source and receiver are swapped, ME => EM:
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z).
if mrec and not msrc:
eval_angle += np.pi
# Define fct (cos/sin) and angles to be tested
if ab in [11, 22, 15, 24, 13, 31, 26, 35]:
fct = np.cos
test_ang_1 = np.pi/2
test_ang_2 = 3*np.pi/2
else:
fct = np.sin
test_ang_1 = np.pi
test_ang_2 = 2*np.pi
if ab in [11, 22, 15, 24, 12, 21, 14, 25]:
eval_angle *= 2
# Get factor
factAng = fct(eval_angle)
# Ensure cos([pi/2, 3pi/2]) and sin([pi, 2pi]) are zero (floating pt issue)
factAng[np.isclose(np.abs(eval_angle), test_ang_1, 1e-10, 1e-14)] = 0
factAng[np.isclose(np.abs(eval_angle), test_ang_2, 1e-10, 1e-14)] = 0
return factAng | python | def angle_factor(angle, ab, msrc, mrec):
r"""Return the angle-dependent factor.
The whole calculation in the wavenumber domain is only a function of the
distance between the source and the receiver, it is independent of the
angel. The angle-dependency is this factor, which can be applied to the
corresponding parts in the wavenumber or in the frequency domain.
The ``angle_factor`` corresponds to the sine and cosine-functions in Eqs
105-107, 111-116, 119-121, 123-128.
This function is called from one of the Hankel functions in
:mod:`transform`. Consult the modelling routines in :mod:`model` for a
description of the input and output parameters.
"""
# 33/66 are completely symmetric and hence independent of angle
if ab in [33, ]:
return np.ones(angle.size)
# Evaluation angle
eval_angle = angle.copy()
# Add pi if receiver is magnetic (reciprocity), but not if source is
# electric, because then source and receiver are swapped, ME => EM:
# G^me_ab(s, r, e, z) = -G^em_ba(r, s, e, z).
if mrec and not msrc:
eval_angle += np.pi
# Define fct (cos/sin) and angles to be tested
if ab in [11, 22, 15, 24, 13, 31, 26, 35]:
fct = np.cos
test_ang_1 = np.pi/2
test_ang_2 = 3*np.pi/2
else:
fct = np.sin
test_ang_1 = np.pi
test_ang_2 = 2*np.pi
if ab in [11, 22, 15, 24, 12, 21, 14, 25]:
eval_angle *= 2
# Get factor
factAng = fct(eval_angle)
# Ensure cos([pi/2, 3pi/2]) and sin([pi, 2pi]) are zero (floating pt issue)
factAng[np.isclose(np.abs(eval_angle), test_ang_1, 1e-10, 1e-14)] = 0
factAng[np.isclose(np.abs(eval_angle), test_ang_2, 1e-10, 1e-14)] = 0
return factAng | [
"def",
"angle_factor",
"(",
"angle",
",",
"ab",
",",
"msrc",
",",
"mrec",
")",
":",
"# 33/66 are completely symmetric and hence independent of angle",
"if",
"ab",
"in",
"[",
"33",
",",
"]",
":",
"return",
"np",
".",
"ones",
"(",
"angle",
".",
"size",
")",
... | r"""Return the angle-dependent factor.
The whole calculation in the wavenumber domain is only a function of the
distance between the source and the receiver, it is independent of the
angel. The angle-dependency is this factor, which can be applied to the
corresponding parts in the wavenumber or in the frequency domain.
The ``angle_factor`` corresponds to the sine and cosine-functions in Eqs
105-107, 111-116, 119-121, 123-128.
This function is called from one of the Hankel functions in
:mod:`transform`. Consult the modelling routines in :mod:`model` for a
description of the input and output parameters. | [
"r",
"Return",
"the",
"angle",
"-",
"dependent",
"factor",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/kernel.py#L570-L620 |
26,116 | empymod/empymod | empymod/scripts/printinfo.py | versions | def versions(mode=None, add_pckg=None, ncol=4):
r"""Old func-way of class `Versions`, here for backwards compatibility.
``mode`` is not used any longer, dummy here.
"""
# Issue warning
mesg = ("\n Func `versions` is deprecated and will " +
"be removed; use Class `Versions` instead.")
warnings.warn(mesg, DeprecationWarning)
return Versions(add_pckg, ncol) | python | def versions(mode=None, add_pckg=None, ncol=4):
r"""Old func-way of class `Versions`, here for backwards compatibility.
``mode`` is not used any longer, dummy here.
"""
# Issue warning
mesg = ("\n Func `versions` is deprecated and will " +
"be removed; use Class `Versions` instead.")
warnings.warn(mesg, DeprecationWarning)
return Versions(add_pckg, ncol) | [
"def",
"versions",
"(",
"mode",
"=",
"None",
",",
"add_pckg",
"=",
"None",
",",
"ncol",
"=",
"4",
")",
":",
"# Issue warning",
"mesg",
"=",
"(",
"\"\\n Func `versions` is deprecated and will \"",
"+",
"\"be removed; use Class `Versions` instead.\"",
")",
"warnings"... | r"""Old func-way of class `Versions`, here for backwards compatibility.
``mode`` is not used any longer, dummy here. | [
"r",
"Old",
"func",
"-",
"way",
"of",
"class",
"Versions",
"here",
"for",
"backwards",
"compatibility",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/printinfo.py#L254-L264 |
26,117 | empymod/empymod | empymod/scripts/printinfo.py | Versions._repr_html_ | def _repr_html_(self):
"""HTML-rendered versions information."""
# Check ncol
ncol = int(self.ncol)
# Define html-styles
border = "border: 2px solid #fff;'"
def colspan(html, txt, ncol, nrow):
r"""Print txt in a row spanning whole table."""
html += " <tr>\n"
html += " <td style='text-align: center; "
if nrow == 0:
html += "font-weight: bold; font-size: 1.2em; "
elif nrow % 2 == 0:
html += "background-color: #ddd;"
html += border + " colspan='"
html += str(2*ncol)+"'>%s</td>\n" % txt
html += " </tr>\n"
return html
def cols(html, version, name, ncol, i):
r"""Print package information in two cells."""
# Check if we have to start a new row
if i > 0 and i % ncol == 0:
html += " </tr>\n"
html += " <tr>\n"
html += " <td style='text-align: right; background-color: "
html += "#ccc; " + border + ">%s</td>\n" % version
html += " <td style='text-align: left; "
html += border + ">%s</td>\n" % name
return html, i+1
# Start html-table
html = "<table style='border: 3px solid #ddd;'>\n"
# Date and time info as title
html = colspan(html, time.strftime('%a %b %d %H:%M:%S %Y %Z'), ncol, 0)
# OS and CPUs
html += " <tr>\n"
html, i = cols(html, platform.system(), 'OS', ncol, 0)
html, i = cols(html, multiprocessing.cpu_count(), 'CPU(s)', ncol, i)
# Loop over packages
for pckg in self._get_packages(self.add_pckg):
html, i = cols(html, pckg.__version__, pckg.__name__, ncol, i)
# Fill up the row
while i % ncol != 0:
html += " <td style= " + border + "></td>\n"
html += " <td style= " + border + "></td>\n"
i += 1
# Finish row
html += " </tr>\n"
# sys.version
html = colspan(html, sys.version, ncol, 1)
# mkl version
if mklinfo:
html = colspan(html, mklinfo, ncol, 2)
# Finish table
html += "</table>"
return html | python | def _repr_html_(self):
# Check ncol
ncol = int(self.ncol)
# Define html-styles
border = "border: 2px solid #fff;'"
def colspan(html, txt, ncol, nrow):
r"""Print txt in a row spanning whole table."""
html += " <tr>\n"
html += " <td style='text-align: center; "
if nrow == 0:
html += "font-weight: bold; font-size: 1.2em; "
elif nrow % 2 == 0:
html += "background-color: #ddd;"
html += border + " colspan='"
html += str(2*ncol)+"'>%s</td>\n" % txt
html += " </tr>\n"
return html
def cols(html, version, name, ncol, i):
r"""Print package information in two cells."""
# Check if we have to start a new row
if i > 0 and i % ncol == 0:
html += " </tr>\n"
html += " <tr>\n"
html += " <td style='text-align: right; background-color: "
html += "#ccc; " + border + ">%s</td>\n" % version
html += " <td style='text-align: left; "
html += border + ">%s</td>\n" % name
return html, i+1
# Start html-table
html = "<table style='border: 3px solid #ddd;'>\n"
# Date and time info as title
html = colspan(html, time.strftime('%a %b %d %H:%M:%S %Y %Z'), ncol, 0)
# OS and CPUs
html += " <tr>\n"
html, i = cols(html, platform.system(), 'OS', ncol, 0)
html, i = cols(html, multiprocessing.cpu_count(), 'CPU(s)', ncol, i)
# Loop over packages
for pckg in self._get_packages(self.add_pckg):
html, i = cols(html, pckg.__version__, pckg.__name__, ncol, i)
# Fill up the row
while i % ncol != 0:
html += " <td style= " + border + "></td>\n"
html += " <td style= " + border + "></td>\n"
i += 1
# Finish row
html += " </tr>\n"
# sys.version
html = colspan(html, sys.version, ncol, 1)
# mkl version
if mklinfo:
html = colspan(html, mklinfo, ncol, 2)
# Finish table
html += "</table>"
return html | [
"def",
"_repr_html_",
"(",
"self",
")",
":",
"# Check ncol",
"ncol",
"=",
"int",
"(",
"self",
".",
"ncol",
")",
"# Define html-styles",
"border",
"=",
"\"border: 2px solid #fff;'\"",
"def",
"colspan",
"(",
"html",
",",
"txt",
",",
"ncol",
",",
"nrow",
")",
... | HTML-rendered versions information. | [
"HTML",
"-",
"rendered",
"versions",
"information",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/printinfo.py#L155-L224 |
26,118 | empymod/empymod | empymod/scripts/printinfo.py | Versions._get_packages | def _get_packages(add_pckg):
r"""Create list of packages."""
# Mandatory packages
pckgs = [numpy, scipy, empymod]
# Optional packages
for module in [IPython, numexpr, matplotlib]:
if module:
pckgs += [module]
# Cast and add add_pckg
if add_pckg is not None:
# Cast add_pckg
if isinstance(add_pckg, tuple):
add_pckg = list(add_pckg)
if not isinstance(add_pckg, list):
add_pckg = [add_pckg, ]
# Add add_pckg
pckgs += add_pckg
return pckgs | python | def _get_packages(add_pckg):
r"""Create list of packages."""
# Mandatory packages
pckgs = [numpy, scipy, empymod]
# Optional packages
for module in [IPython, numexpr, matplotlib]:
if module:
pckgs += [module]
# Cast and add add_pckg
if add_pckg is not None:
# Cast add_pckg
if isinstance(add_pckg, tuple):
add_pckg = list(add_pckg)
if not isinstance(add_pckg, list):
add_pckg = [add_pckg, ]
# Add add_pckg
pckgs += add_pckg
return pckgs | [
"def",
"_get_packages",
"(",
"add_pckg",
")",
":",
"# Mandatory packages",
"pckgs",
"=",
"[",
"numpy",
",",
"scipy",
",",
"empymod",
"]",
"# Optional packages",
"for",
"module",
"in",
"[",
"IPython",
",",
"numexpr",
",",
"matplotlib",
"]",
":",
"if",
"module... | r"""Create list of packages. | [
"r",
"Create",
"list",
"of",
"packages",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/scripts/printinfo.py#L227-L251 |
26,119 | empymod/empymod | empymod/filters.py | DigitalFilter.tofile | def tofile(self, path='filters'):
r"""Save filter values to ascii-files.
Store the filter base and the filter coefficients in separate files
in the directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Load a filter
>>> filt = empymod.filters.wer_201_2018()
>>> # Save it to pure ascii-files
>>> filt.tofile()
>>> # This will save the following three files:
>>> # ./filters/wer_201_2018_base.txt
>>> # ./filters/wer_201_2018_j0.txt
>>> # ./filters/wer_201_2018_j1.txt
"""
# Get name of filter
name = self.savename
# Get absolute path, create if it doesn't exist
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
# Save filter base
basefile = os.path.join(path, name + '_base.txt')
with open(basefile, 'w') as f:
self.base.tofile(f, sep="\n")
# Save filter coefficients
for val in ['j0', 'j1', 'sin', 'cos']:
if hasattr(self, val):
attrfile = os.path.join(path, name + '_' + val + '.txt')
with open(attrfile, 'w') as f:
getattr(self, val).tofile(f, sep="\n") | python | def tofile(self, path='filters'):
r"""Save filter values to ascii-files.
Store the filter base and the filter coefficients in separate files
in the directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Load a filter
>>> filt = empymod.filters.wer_201_2018()
>>> # Save it to pure ascii-files
>>> filt.tofile()
>>> # This will save the following three files:
>>> # ./filters/wer_201_2018_base.txt
>>> # ./filters/wer_201_2018_j0.txt
>>> # ./filters/wer_201_2018_j1.txt
"""
# Get name of filter
name = self.savename
# Get absolute path, create if it doesn't exist
path = os.path.abspath(path)
os.makedirs(path, exist_ok=True)
# Save filter base
basefile = os.path.join(path, name + '_base.txt')
with open(basefile, 'w') as f:
self.base.tofile(f, sep="\n")
# Save filter coefficients
for val in ['j0', 'j1', 'sin', 'cos']:
if hasattr(self, val):
attrfile = os.path.join(path, name + '_' + val + '.txt')
with open(attrfile, 'w') as f:
getattr(self, val).tofile(f, sep="\n") | [
"def",
"tofile",
"(",
"self",
",",
"path",
"=",
"'filters'",
")",
":",
"# Get name of filter",
"name",
"=",
"self",
".",
"savename",
"# Get absolute path, create if it doesn't exist",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"os",
".",... | r"""Save filter values to ascii-files.
Store the filter base and the filter coefficients in separate files
in the directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Load a filter
>>> filt = empymod.filters.wer_201_2018()
>>> # Save it to pure ascii-files
>>> filt.tofile()
>>> # This will save the following three files:
>>> # ./filters/wer_201_2018_base.txt
>>> # ./filters/wer_201_2018_j0.txt
>>> # ./filters/wer_201_2018_j1.txt | [
"r",
"Save",
"filter",
"values",
"to",
"ascii",
"-",
"files",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/filters.py#L77-L114 |
26,120 | empymod/empymod | empymod/filters.py | DigitalFilter.fromfile | def fromfile(self, path='filters'):
r"""Load filter values from ascii-files.
Load filter base and filter coefficients from ascii files in the
directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Create an empty filter;
>>> # Name has to be the base of the text files
>>> filt = empymod.filters.DigitalFilter('my-filter')
>>> # Load the ascii-files
>>> filt.fromfile()
>>> # This will load the following three files:
>>> # ./filters/my-filter_base.txt
>>> # ./filters/my-filter_j0.txt
>>> # ./filters/my-filter_j1.txt
>>> # and store them in filt.base, filt.j0, and filt.j1.
"""
# Get name of filter
name = self.savename
# Get absolute path
path = os.path.abspath(path)
# Get filter base
basefile = os.path.join(path, name + '_base.txt')
with open(basefile, 'r') as f:
self.base = np.fromfile(f, sep="\n")
# Get filter coefficients
for val in ['j0', 'j1', 'sin', 'cos']:
attrfile = os.path.join(path, name + '_' + val + '.txt')
if os.path.isfile(attrfile):
with open(attrfile, 'r') as f:
setattr(self, val, np.fromfile(f, sep="\n"))
# Add factor
self.factor = np.around(np.average(self.base[1:]/self.base[:-1]), 15) | python | def fromfile(self, path='filters'):
r"""Load filter values from ascii-files.
Load filter base and filter coefficients from ascii files in the
directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Create an empty filter;
>>> # Name has to be the base of the text files
>>> filt = empymod.filters.DigitalFilter('my-filter')
>>> # Load the ascii-files
>>> filt.fromfile()
>>> # This will load the following three files:
>>> # ./filters/my-filter_base.txt
>>> # ./filters/my-filter_j0.txt
>>> # ./filters/my-filter_j1.txt
>>> # and store them in filt.base, filt.j0, and filt.j1.
"""
# Get name of filter
name = self.savename
# Get absolute path
path = os.path.abspath(path)
# Get filter base
basefile = os.path.join(path, name + '_base.txt')
with open(basefile, 'r') as f:
self.base = np.fromfile(f, sep="\n")
# Get filter coefficients
for val in ['j0', 'j1', 'sin', 'cos']:
attrfile = os.path.join(path, name + '_' + val + '.txt')
if os.path.isfile(attrfile):
with open(attrfile, 'r') as f:
setattr(self, val, np.fromfile(f, sep="\n"))
# Add factor
self.factor = np.around(np.average(self.base[1:]/self.base[:-1]), 15) | [
"def",
"fromfile",
"(",
"self",
",",
"path",
"=",
"'filters'",
")",
":",
"# Get name of filter",
"name",
"=",
"self",
".",
"savename",
"# Get absolute path",
"path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
"# Get filter base",
"basefile",
"... | r"""Load filter values from ascii-files.
Load filter base and filter coefficients from ascii files in the
directory `path`; `path` can be a relative or absolute path.
Examples
--------
>>> import empymod
>>> # Create an empty filter;
>>> # Name has to be the base of the text files
>>> filt = empymod.filters.DigitalFilter('my-filter')
>>> # Load the ascii-files
>>> filt.fromfile()
>>> # This will load the following three files:
>>> # ./filters/my-filter_base.txt
>>> # ./filters/my-filter_j0.txt
>>> # ./filters/my-filter_j1.txt
>>> # and store them in filt.base, filt.j0, and filt.j1. | [
"r",
"Load",
"filter",
"values",
"from",
"ascii",
"-",
"files",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/filters.py#L116-L157 |
26,121 | empymod/empymod | empymod/transform.py | fht | def fht(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, fhtarg, use_ne_eval, msrc, mrec):
r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD.
"""
# 1. Get fhtargs
fhtfilt = fhtarg[0]
pts_per_dec = fhtarg[1]
lambd = fhtarg[2]
int_pts = fhtarg[3]
# 2. Call the kernel
PJ = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec, use_ne_eval)
# 3. Carry out the dlf
fEM = dlf(PJ, lambd, off, fhtfilt, pts_per_dec, factAng=factAng, ab=ab,
int_pts=int_pts)
return fEM, 1, True | python | def fht(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, fhtarg, use_ne_eval, msrc, mrec):
r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD.
"""
# 1. Get fhtargs
fhtfilt = fhtarg[0]
pts_per_dec = fhtarg[1]
lambd = fhtarg[2]
int_pts = fhtarg[3]
# 2. Call the kernel
PJ = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec, use_ne_eval)
# 3. Carry out the dlf
fEM = dlf(PJ, lambd, off, fhtfilt, pts_per_dec, factAng=factAng, ab=ab,
int_pts=int_pts)
return fEM, 1, True | [
"def",
"fht",
"(",
"zsrc",
",",
"zrec",
",",
"lsrc",
",",
"lrec",
",",
"off",
",",
"factAng",
",",
"depth",
",",
"ab",
",",
"etaH",
",",
"etaV",
",",
"zetaH",
",",
"zetaV",
",",
"xdirect",
",",
"fhtarg",
",",
"use_ne_eval",
",",
"msrc",
",",
"mre... | r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD. | [
"r",
"Hankel",
"Transform",
"using",
"the",
"Digital",
"Linear",
"Filter",
"method",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L47-L107 |
26,122 | empymod/empymod | empymod/transform.py | hquad | def hquad(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, quadargs, use_ne_eval, msrc, mrec):
r"""Hankel Transform using the ``QUADPACK`` library.
This routine uses the ``scipy.integrate.quad`` module, which in turn makes
use of the Fortran library ``QUADPACK`` (``qagse``).
It is massively (orders of magnitudes) slower than either ``fht`` or
``hqwe``, and is mainly here for completeness and comparison purposes. It
always uses interpolation in the wavenumber domain, hence it generally will
not be as precise as the other methods. However, it might work in some
areas where the others fail.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For HQUAD, this is 1.
conv : bool
If true, QUAD converged. If not, <htarg> might have to be adjusted.
"""
# Get quadargs
rtol, atol, limit, a, b, pts_per_dec = quadargs
# Get required lambdas
la = np.log10(a)
lb = np.log10(b)
ilambd = np.logspace(la, lb, (lb-la)*pts_per_dec + 1)
# Call the kernel
PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(ilambd), ab, xdirect,
msrc, mrec, use_ne_eval)
# Interpolation in wavenumber domain: Has to be done separately on each PJ,
# in order to work with multiple offsets which have different angles.
# We check if the kernels are zero, to avoid unnecessary calculations.
if PJ0 is not None:
sPJ0r = iuSpline(np.log(ilambd), PJ0.real)
sPJ0i = iuSpline(np.log(ilambd), PJ0.imag)
else:
sPJ0r = None
sPJ0i = None
if PJ1 is not None:
sPJ1r = iuSpline(np.log(ilambd), PJ1.real)
sPJ1i = iuSpline(np.log(ilambd), PJ1.imag)
else:
sPJ1r = None
sPJ1i = None
if PJ0b is not None:
sPJ0br = iuSpline(np.log(ilambd), PJ0b.real)
sPJ0bi = iuSpline(np.log(ilambd), PJ0b.imag)
else:
sPJ0br = None
sPJ0bi = None
# Pre-allocate output array
fEM = np.zeros(off.size, dtype=complex)
conv = True
# Input-dictionary for quad
iinp = {'a': a, 'b': b, 'epsabs': atol, 'epsrel': rtol, 'limit': limit}
# Loop over offsets
for i in range(off.size):
fEM[i], tc = quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi, ab,
off[i], factAng[i], iinp)
conv *= tc
# Return the electromagnetic field
# Second argument (1) is the kernel count, last argument is only for QWE.
return fEM, 1, conv | python | def hquad(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, quadargs, use_ne_eval, msrc, mrec):
r"""Hankel Transform using the ``QUADPACK`` library.
This routine uses the ``scipy.integrate.quad`` module, which in turn makes
use of the Fortran library ``QUADPACK`` (``qagse``).
It is massively (orders of magnitudes) slower than either ``fht`` or
``hqwe``, and is mainly here for completeness and comparison purposes. It
always uses interpolation in the wavenumber domain, hence it generally will
not be as precise as the other methods. However, it might work in some
areas where the others fail.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For HQUAD, this is 1.
conv : bool
If true, QUAD converged. If not, <htarg> might have to be adjusted.
"""
# Get quadargs
rtol, atol, limit, a, b, pts_per_dec = quadargs
# Get required lambdas
la = np.log10(a)
lb = np.log10(b)
ilambd = np.logspace(la, lb, (lb-la)*pts_per_dec + 1)
# Call the kernel
PJ0, PJ1, PJ0b = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH,
etaV, zetaH, zetaV,
np.atleast_2d(ilambd), ab, xdirect,
msrc, mrec, use_ne_eval)
# Interpolation in wavenumber domain: Has to be done separately on each PJ,
# in order to work with multiple offsets which have different angles.
# We check if the kernels are zero, to avoid unnecessary calculations.
if PJ0 is not None:
sPJ0r = iuSpline(np.log(ilambd), PJ0.real)
sPJ0i = iuSpline(np.log(ilambd), PJ0.imag)
else:
sPJ0r = None
sPJ0i = None
if PJ1 is not None:
sPJ1r = iuSpline(np.log(ilambd), PJ1.real)
sPJ1i = iuSpline(np.log(ilambd), PJ1.imag)
else:
sPJ1r = None
sPJ1i = None
if PJ0b is not None:
sPJ0br = iuSpline(np.log(ilambd), PJ0b.real)
sPJ0bi = iuSpline(np.log(ilambd), PJ0b.imag)
else:
sPJ0br = None
sPJ0bi = None
# Pre-allocate output array
fEM = np.zeros(off.size, dtype=complex)
conv = True
# Input-dictionary for quad
iinp = {'a': a, 'b': b, 'epsabs': atol, 'epsrel': rtol, 'limit': limit}
# Loop over offsets
for i in range(off.size):
fEM[i], tc = quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi, ab,
off[i], factAng[i], iinp)
conv *= tc
# Return the electromagnetic field
# Second argument (1) is the kernel count, last argument is only for QWE.
return fEM, 1, conv | [
"def",
"hquad",
"(",
"zsrc",
",",
"zrec",
",",
"lsrc",
",",
"lrec",
",",
"off",
",",
"factAng",
",",
"depth",
",",
"ab",
",",
"etaH",
",",
"etaV",
",",
"zetaH",
",",
"zetaV",
",",
"xdirect",
",",
"quadargs",
",",
"use_ne_eval",
",",
"msrc",
",",
... | r"""Hankel Transform using the ``QUADPACK`` library.
This routine uses the ``scipy.integrate.quad`` module, which in turn makes
use of the Fortran library ``QUADPACK`` (``qagse``).
It is massively (orders of magnitudes) slower than either ``fht`` or
``hqwe``, and is mainly here for completeness and comparison purposes. It
always uses interpolation in the wavenumber domain, hence it generally will
not be as precise as the other methods. However, it might work in some
areas where the others fail.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For HQUAD, this is 1.
conv : bool
If true, QUAD converged. If not, <htarg> might have to be adjusted. | [
"r",
"Hankel",
"Transform",
"using",
"the",
"QUADPACK",
"library",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L399-L482 |
26,123 | empymod/empymod | empymod/transform.py | ffht | def ffht(fEM, time, freq, ftarg):
r"""Fourier Transform using the Digital Linear Filter method.
It follows the Filter methodology [Ande75]_, using Cosine- and
Sine-filters; see ``fht`` for more information.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
This function is based on ``get_CSEM1D_TD_FHT.m`` from the source code
distributed with [Key12]_.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD.
"""
# Get ffhtargs
ffhtfilt = ftarg[0]
pts_per_dec = ftarg[1]
kind = ftarg[2] # Sine (`sin`) or cosine (`cos`)
# Cast into Standard DLF format
if pts_per_dec == 0:
fEM = fEM.reshape(time.size, -1)
# Carry out DLF
tEM = dlf(fEM, 2*np.pi*freq, time, ffhtfilt, pts_per_dec, kind=kind)
# Return the electromagnetic time domain field
# (Second argument is only for QWE)
return tEM, True | python | def ffht(fEM, time, freq, ftarg):
r"""Fourier Transform using the Digital Linear Filter method.
It follows the Filter methodology [Ande75]_, using Cosine- and
Sine-filters; see ``fht`` for more information.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
This function is based on ``get_CSEM1D_TD_FHT.m`` from the source code
distributed with [Key12]_.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD.
"""
# Get ffhtargs
ffhtfilt = ftarg[0]
pts_per_dec = ftarg[1]
kind = ftarg[2] # Sine (`sin`) or cosine (`cos`)
# Cast into Standard DLF format
if pts_per_dec == 0:
fEM = fEM.reshape(time.size, -1)
# Carry out DLF
tEM = dlf(fEM, 2*np.pi*freq, time, ffhtfilt, pts_per_dec, kind=kind)
# Return the electromagnetic time domain field
# (Second argument is only for QWE)
return tEM, True | [
"def",
"ffht",
"(",
"fEM",
",",
"time",
",",
"freq",
",",
"ftarg",
")",
":",
"# Get ffhtargs",
"ffhtfilt",
"=",
"ftarg",
"[",
"0",
"]",
"pts_per_dec",
"=",
"ftarg",
"[",
"1",
"]",
"kind",
"=",
"ftarg",
"[",
"2",
"]",
"# Sine (`sin`) or cosine (`cos`)",
... | r"""Fourier Transform using the Digital Linear Filter method.
It follows the Filter methodology [Ande75]_, using Cosine- and
Sine-filters; see ``fht`` for more information.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
This function is based on ``get_CSEM1D_TD_FHT.m`` from the source code
distributed with [Key12]_.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD. | [
"r",
"Fourier",
"Transform",
"using",
"the",
"Digital",
"Linear",
"Filter",
"method",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L487-L524 |
26,124 | empymod/empymod | empymod/transform.py | fft | def fft(fEM, time, freq, ftarg):
r"""Fourier Transform using the Fast Fourier Transform.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD.
"""
# Get ftarg values
dfreq, nfreq, ntot, pts_per_dec = ftarg
# If pts_per_dec, we have first to interpolate fEM to required freqs
if pts_per_dec:
sfEMr = iuSpline(np.log(freq), fEM.real)
sfEMi = iuSpline(np.log(freq), fEM.imag)
freq = np.arange(1, nfreq+1)*dfreq
fEM = sfEMr(np.log(freq)) + 1j*sfEMi(np.log(freq))
# Pad the frequency result
fEM = np.pad(fEM, (0, ntot-nfreq), 'linear_ramp')
# Carry out FFT
ifftEM = fftpack.ifft(np.r_[fEM[1:], 0, fEM[::-1].conj()]).real
stEM = 2*ntot*fftpack.fftshift(ifftEM*dfreq, 0)
# Interpolate in time domain
dt = 1/(2*ntot*dfreq)
ifEM = iuSpline(np.linspace(-ntot, ntot-1, 2*ntot)*dt, stEM)
tEM = ifEM(time)/2*np.pi # (Multiplication of 2/pi in model.tem)
# Return the electromagnetic time domain field
# (Second argument is only for QWE)
return tEM, True | python | def fft(fEM, time, freq, ftarg):
r"""Fourier Transform using the Fast Fourier Transform.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD.
"""
# Get ftarg values
dfreq, nfreq, ntot, pts_per_dec = ftarg
# If pts_per_dec, we have first to interpolate fEM to required freqs
if pts_per_dec:
sfEMr = iuSpline(np.log(freq), fEM.real)
sfEMi = iuSpline(np.log(freq), fEM.imag)
freq = np.arange(1, nfreq+1)*dfreq
fEM = sfEMr(np.log(freq)) + 1j*sfEMi(np.log(freq))
# Pad the frequency result
fEM = np.pad(fEM, (0, ntot-nfreq), 'linear_ramp')
# Carry out FFT
ifftEM = fftpack.ifft(np.r_[fEM[1:], 0, fEM[::-1].conj()]).real
stEM = 2*ntot*fftpack.fftshift(ifftEM*dfreq, 0)
# Interpolate in time domain
dt = 1/(2*ntot*dfreq)
ifEM = iuSpline(np.linspace(-ntot, ntot-1, 2*ntot)*dt, stEM)
tEM = ifEM(time)/2*np.pi # (Multiplication of 2/pi in model.tem)
# Return the electromagnetic time domain field
# (Second argument is only for QWE)
return tEM, True | [
"def",
"fft",
"(",
"fEM",
",",
"time",
",",
"freq",
",",
"ftarg",
")",
":",
"# Get ftarg values",
"dfreq",
",",
"nfreq",
",",
"ntot",
",",
"pts_per_dec",
"=",
"ftarg",
"# If pts_per_dec, we have first to interpolate fEM to required freqs",
"if",
"pts_per_dec",
":",
... | r"""Fourier Transform using the Fast Fourier Transform.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
tEM : array
Returns time-domain EM response of ``fEM`` for given ``time``.
conv : bool
Only relevant for QWE/QUAD. | [
"r",
"Fourier",
"Transform",
"using",
"the",
"Fast",
"Fourier",
"Transform",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L766-L806 |
26,125 | empymod/empymod | empymod/transform.py | quad | def quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi, ab, off, factAng, iinp):
r"""Quadrature for Hankel transform.
This is the kernel of the QUAD method, used for the Hankel transforms
``hquad`` and ``hqwe`` (where the integral is not suited for QWE).
"""
# Define the quadrature kernels
def quad_PJ0(klambd, sPJ0, koff):
r"""Quadrature for PJ0."""
return sPJ0(np.log(klambd))*special.j0(koff*klambd)
def quad_PJ1(klambd, sPJ1, ab, koff, kang):
r"""Quadrature for PJ1."""
tP1 = kang*sPJ1(np.log(klambd))
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Because of J2
# J2(kr) = 2/(kr)*J1(kr) - J0(kr)
tP1 /= koff
return tP1*special.j1(koff*klambd)
def quad_PJ0b(klambd, sPJ0b, koff, kang):
r"""Quadrature for PJ0b."""
return kang*sPJ0b(np.log(klambd))*special.j0(koff*klambd)
# Pre-allocate output
conv = True
out = np.array(0.0+0.0j)
# Carry out quadrature for required kernels
iinp['full_output'] = 1
if sPJ0r is not None:
re = integrate.quad(quad_PJ0, args=(sPJ0r, off), **iinp)
im = integrate.quad(quad_PJ0, args=(sPJ0i, off), **iinp)
out += re[0] + 1j*im[0]
# If there is a fourth output from QUAD, it means it did not converge
if (len(re) or len(im)) > 3:
conv = False
if sPJ1r is not None:
re = integrate.quad(quad_PJ1, args=(sPJ1r, ab, off, factAng), **iinp)
im = integrate.quad(quad_PJ1, args=(sPJ1i, ab, off, factAng), **iinp)
out += re[0] + 1j*im[0]
# If there is a fourth output from QUAD, it means it did not converge
if (len(re) or len(im)) > 3:
conv = False
if sPJ0br is not None:
re = integrate.quad(quad_PJ0b, args=(sPJ0br, off, factAng), **iinp)
im = integrate.quad(quad_PJ0b, args=(sPJ0bi, off, factAng), **iinp)
out += re[0] + 1j*im[0]
# If there is a fourth output from QUAD, it means it did not converge
if (len(re) or len(im)) > 3:
conv = False
# Collect the results
return out, conv | python | def quad(sPJ0r, sPJ0i, sPJ1r, sPJ1i, sPJ0br, sPJ0bi, ab, off, factAng, iinp):
r"""Quadrature for Hankel transform.
This is the kernel of the QUAD method, used for the Hankel transforms
``hquad`` and ``hqwe`` (where the integral is not suited for QWE).
"""
# Define the quadrature kernels
def quad_PJ0(klambd, sPJ0, koff):
r"""Quadrature for PJ0."""
return sPJ0(np.log(klambd))*special.j0(koff*klambd)
def quad_PJ1(klambd, sPJ1, ab, koff, kang):
r"""Quadrature for PJ1."""
tP1 = kang*sPJ1(np.log(klambd))
if ab in [11, 12, 21, 22, 14, 24, 15, 25]: # Because of J2
# J2(kr) = 2/(kr)*J1(kr) - J0(kr)
tP1 /= koff
return tP1*special.j1(koff*klambd)
def quad_PJ0b(klambd, sPJ0b, koff, kang):
r"""Quadrature for PJ0b."""
return kang*sPJ0b(np.log(klambd))*special.j0(koff*klambd)
# Pre-allocate output
conv = True
out = np.array(0.0+0.0j)
# Carry out quadrature for required kernels
iinp['full_output'] = 1
if sPJ0r is not None:
re = integrate.quad(quad_PJ0, args=(sPJ0r, off), **iinp)
im = integrate.quad(quad_PJ0, args=(sPJ0i, off), **iinp)
out += re[0] + 1j*im[0]
# If there is a fourth output from QUAD, it means it did not converge
if (len(re) or len(im)) > 3:
conv = False
if sPJ1r is not None:
re = integrate.quad(quad_PJ1, args=(sPJ1r, ab, off, factAng), **iinp)
im = integrate.quad(quad_PJ1, args=(sPJ1i, ab, off, factAng), **iinp)
out += re[0] + 1j*im[0]
# If there is a fourth output from QUAD, it means it did not converge
if (len(re) or len(im)) > 3:
conv = False
if sPJ0br is not None:
re = integrate.quad(quad_PJ0b, args=(sPJ0br, off, factAng), **iinp)
im = integrate.quad(quad_PJ0b, args=(sPJ0bi, off, factAng), **iinp)
out += re[0] + 1j*im[0]
# If there is a fourth output from QUAD, it means it did not converge
if (len(re) or len(im)) > 3:
conv = False
# Collect the results
return out, conv | [
"def",
"quad",
"(",
"sPJ0r",
",",
"sPJ0i",
",",
"sPJ1r",
",",
"sPJ1i",
",",
"sPJ0br",
",",
"sPJ0bi",
",",
"ab",
",",
"off",
",",
"factAng",
",",
"iinp",
")",
":",
"# Define the quadrature kernels",
"def",
"quad_PJ0",
"(",
"klambd",
",",
"sPJ0",
",",
"k... | r"""Quadrature for Hankel transform.
This is the kernel of the QUAD method, used for the Hankel transforms
``hquad`` and ``hqwe`` (where the integral is not suited for QWE). | [
"r",
"Quadrature",
"for",
"Hankel",
"transform",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L1097-L1156 |
26,126 | empymod/empymod | empymod/transform.py | get_spline_values | def get_spline_values(filt, inp, nr_per_dec=None):
r"""Return required calculation points."""
# Standard DLF
if nr_per_dec == 0:
return filt.base/inp[:, None], inp
# Get min and max required out-values (depends on filter and inp-value)
outmax = filt.base[-1]/inp.min()
outmin = filt.base[0]/inp.max()
# Get pts_per_dec and define number of out-values, depending on pts_per_dec
if nr_per_dec < 0: # Lagged Convolution DLF
pts_per_dec = 1/np.log(filt.factor)
# Calculate number of output values
nout = int(np.ceil(np.log(outmax/outmin)*pts_per_dec) + 1)
else: # Splined DLF
pts_per_dec = nr_per_dec
# Calculate number of output values
nout = int(np.ceil(np.log10(outmax/outmin)*pts_per_dec) + 1)
# Min-nout check, becaus the cubic InterpolatedUnivariateSpline needs at
# least 4 points.
if nr_per_dec < 0: # Lagged Convolution DLF
# Lagged Convolution DLF interpolates in output domain, so `new_inp`
# needs to have at least 4 points.
if nout-filt.base.size < 3:
nout = filt.base.size+3
else: # Splined DLF
# Splined DLF interpolates in input domain, so `out` needs to have at
# least 4 points. This should always be the case, we're just overly
# cautious here.
if nout < 4:
nout = 4
if nr_per_dec < 0:
# Calculate output values
out = np.exp(np.arange(np.log(outmin), np.log(outmin) +
nout/pts_per_dec, 1/pts_per_dec))
# If lagged convolution is used, we calculate the new input values, as
# spline is carried out in the input domain.
new_inp = inp.max()*np.exp(-np.arange(nout - filt.base.size + 1) /
pts_per_dec)
else:
# Calculate output values
out = 10**np.arange(np.log10(outmin), np.log10(outmin) +
nout/pts_per_dec, 1/pts_per_dec)
# If spline is used, interpolation is carried out in output domain and
# we calculate the intermediate values.
new_inp = filt.base/inp[:, None]
# Return output values
return np.atleast_2d(out), new_inp | python | def get_spline_values(filt, inp, nr_per_dec=None):
r"""Return required calculation points."""
# Standard DLF
if nr_per_dec == 0:
return filt.base/inp[:, None], inp
# Get min and max required out-values (depends on filter and inp-value)
outmax = filt.base[-1]/inp.min()
outmin = filt.base[0]/inp.max()
# Get pts_per_dec and define number of out-values, depending on pts_per_dec
if nr_per_dec < 0: # Lagged Convolution DLF
pts_per_dec = 1/np.log(filt.factor)
# Calculate number of output values
nout = int(np.ceil(np.log(outmax/outmin)*pts_per_dec) + 1)
else: # Splined DLF
pts_per_dec = nr_per_dec
# Calculate number of output values
nout = int(np.ceil(np.log10(outmax/outmin)*pts_per_dec) + 1)
# Min-nout check, becaus the cubic InterpolatedUnivariateSpline needs at
# least 4 points.
if nr_per_dec < 0: # Lagged Convolution DLF
# Lagged Convolution DLF interpolates in output domain, so `new_inp`
# needs to have at least 4 points.
if nout-filt.base.size < 3:
nout = filt.base.size+3
else: # Splined DLF
# Splined DLF interpolates in input domain, so `out` needs to have at
# least 4 points. This should always be the case, we're just overly
# cautious here.
if nout < 4:
nout = 4
if nr_per_dec < 0:
# Calculate output values
out = np.exp(np.arange(np.log(outmin), np.log(outmin) +
nout/pts_per_dec, 1/pts_per_dec))
# If lagged convolution is used, we calculate the new input values, as
# spline is carried out in the input domain.
new_inp = inp.max()*np.exp(-np.arange(nout - filt.base.size + 1) /
pts_per_dec)
else:
# Calculate output values
out = 10**np.arange(np.log10(outmin), np.log10(outmin) +
nout/pts_per_dec, 1/pts_per_dec)
# If spline is used, interpolation is carried out in output domain and
# we calculate the intermediate values.
new_inp = filt.base/inp[:, None]
# Return output values
return np.atleast_2d(out), new_inp | [
"def",
"get_spline_values",
"(",
"filt",
",",
"inp",
",",
"nr_per_dec",
"=",
"None",
")",
":",
"# Standard DLF",
"if",
"nr_per_dec",
"==",
"0",
":",
"return",
"filt",
".",
"base",
"/",
"inp",
"[",
":",
",",
"None",
"]",
",",
"inp",
"# Get min and max req... | r"""Return required calculation points. | [
"r",
"Return",
"required",
"calculation",
"points",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L1159-L1216 |
26,127 | empymod/empymod | empymod/transform.py | fhti | def fhti(rmin, rmax, n, q, mu):
r"""Return parameters required for FFTLog."""
# Central point log10(r_c) of periodic interval
logrc = (rmin + rmax)/2
# Central index (1/2 integral if n is even)
nc = (n + 1)/2.
# Log spacing of points
dlogr = (rmax - rmin)/n
dlnr = dlogr*np.log(10.)
# Get low-ringing kr
y = 1j*np.pi/(2.0*dlnr)
zp = special.loggamma((mu + 1.0 + q)/2.0 + y)
zm = special.loggamma((mu + 1.0 - q)/2.0 + y)
arg = np.log(2.0)/dlnr + (zp.imag + zm.imag)/np.pi
kr = np.exp((arg - np.round(arg))*dlnr)
# Calculate required input x-values (freq); angular freq -> freq
freq = 10**(logrc + (np.arange(1, n+1) - nc)*dlogr)/(2*np.pi)
# Calculate tcalc with adjusted kr
logkc = np.log10(kr) - logrc
tcalc = 10**(logkc + (np.arange(1, n+1) - nc)*dlogr)
# rk = r_c/k_r; adjust for Fourier transform scaling
rk = 10**(logrc - logkc)*np.pi/2
return freq, tcalc, dlnr, kr, rk | python | def fhti(rmin, rmax, n, q, mu):
r"""Return parameters required for FFTLog."""
# Central point log10(r_c) of periodic interval
logrc = (rmin + rmax)/2
# Central index (1/2 integral if n is even)
nc = (n + 1)/2.
# Log spacing of points
dlogr = (rmax - rmin)/n
dlnr = dlogr*np.log(10.)
# Get low-ringing kr
y = 1j*np.pi/(2.0*dlnr)
zp = special.loggamma((mu + 1.0 + q)/2.0 + y)
zm = special.loggamma((mu + 1.0 - q)/2.0 + y)
arg = np.log(2.0)/dlnr + (zp.imag + zm.imag)/np.pi
kr = np.exp((arg - np.round(arg))*dlnr)
# Calculate required input x-values (freq); angular freq -> freq
freq = 10**(logrc + (np.arange(1, n+1) - nc)*dlogr)/(2*np.pi)
# Calculate tcalc with adjusted kr
logkc = np.log10(kr) - logrc
tcalc = 10**(logkc + (np.arange(1, n+1) - nc)*dlogr)
# rk = r_c/k_r; adjust for Fourier transform scaling
rk = 10**(logrc - logkc)*np.pi/2
return freq, tcalc, dlnr, kr, rk | [
"def",
"fhti",
"(",
"rmin",
",",
"rmax",
",",
"n",
",",
"q",
",",
"mu",
")",
":",
"# Central point log10(r_c) of periodic interval",
"logrc",
"=",
"(",
"rmin",
"+",
"rmax",
")",
"/",
"2",
"# Central index (1/2 integral if n is even)",
"nc",
"=",
"(",
"n",
"+... | r"""Return parameters required for FFTLog. | [
"r",
"Return",
"parameters",
"required",
"for",
"FFTLog",
"."
] | 4a78ca4191ed4b4d42d019ce715a9a3889dba1bc | https://github.com/empymod/empymod/blob/4a78ca4191ed4b4d42d019ce715a9a3889dba1bc/empymod/transform.py#L1219-L1249 |
26,128 | workhorsy/py-cpuinfo | cpuinfo/cpuinfo.py | _actual_get_cpu_info_from_cpuid | def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info)) | python | def _actual_get_cpu_info_from_cpuid(queue):
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
# Pipe all output to nothing
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
queue.put(_obj_to_b64({}))
return
# Return none if SE Linux is in enforcing mode
cpuid = CPUID()
if cpuid.is_selinux_enforcing:
queue.put(_obj_to_b64({}))
return
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_info['size_kb']),
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : hex(cache_info['associativity']),
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'extended_model' : info['extended_model'],
'extended_family' : info['extended_family'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = {k: v for k, v in info.items() if v}
queue.put(_obj_to_b64(info)) | [
"def",
"_actual_get_cpu_info_from_cpuid",
"(",
"queue",
")",
":",
"# Pipe all output to nothing",
"sys",
".",
"stdout",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
"sys",
".",
"stderr",
"=",
"open",
"(",
"os",
".",
"devnull",
",",
"'w'",
")",
... | Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process. | [
"Warning!",
"This",
"function",
"has",
"the",
"potential",
"to",
"crash",
"the",
"Python",
"runtime",
".",
"Do",
"not",
"call",
"it",
"directly",
".",
"Use",
"the",
"_get_cpu_info_from_cpuid",
"function",
"instead",
".",
"It",
"will",
"safely",
"call",
"this",... | c15afb770c1139bf76215852e17eb4f677ca3d2f | https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L1294-L1356 |
26,129 | workhorsy/py-cpuinfo | cpuinfo/cpuinfo.py | get_cpu_info_json | def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output | python | def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output | [
"def",
"get_cpu_info_json",
"(",
")",
":",
"import",
"json",
"output",
"=",
"None",
"# If running under pyinstaller, run normally",
"if",
"getattr",
"(",
"sys",
",",
"'frozen'",
",",
"False",
")",
":",
"info",
"=",
"_get_cpu_info_internal",
"(",
")",
"output",
"... | Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string | [
"Returns",
"the",
"CPU",
"info",
"by",
"using",
"the",
"best",
"sources",
"of",
"information",
"for",
"your",
"OS",
".",
"Returns",
"the",
"result",
"in",
"a",
"json",
"string"
] | c15afb770c1139bf76215852e17eb4f677ca3d2f | https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2275-L2306 |
26,130 | workhorsy/py-cpuinfo | cpuinfo/cpuinfo.py | get_cpu_info | def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output | python | def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output | [
"def",
"get_cpu_info",
"(",
")",
":",
"import",
"json",
"output",
"=",
"get_cpu_info_json",
"(",
")",
"# Convert JSON to Python with non unicode strings",
"output",
"=",
"json",
".",
"loads",
"(",
"output",
",",
"object_hook",
"=",
"_utf_to_str",
")",
"return",
"o... | Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict | [
"Returns",
"the",
"CPU",
"info",
"by",
"using",
"the",
"best",
"sources",
"of",
"information",
"for",
"your",
"OS",
".",
"Returns",
"the",
"result",
"in",
"a",
"dict"
] | c15afb770c1139bf76215852e17eb4f677ca3d2f | https://github.com/workhorsy/py-cpuinfo/blob/c15afb770c1139bf76215852e17eb4f677ca3d2f/cpuinfo/cpuinfo.py#L2308-L2321 |
26,131 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qmangle/qmangle/__init__.py | _verbs_with_subjects | def _verbs_with_subjects(doc):
"""Given a spacy document return the verbs that have subjects"""
# TODO: UNUSED
verb_subj = []
for possible_subject in doc:
if (possible_subject.dep_ == 'nsubj' and possible_subject.head.pos_ ==
'VERB'):
verb_subj.append([possible_subject.head, possible_subject])
return verb_subj | python | def _verbs_with_subjects(doc):
# TODO: UNUSED
verb_subj = []
for possible_subject in doc:
if (possible_subject.dep_ == 'nsubj' and possible_subject.head.pos_ ==
'VERB'):
verb_subj.append([possible_subject.head, possible_subject])
return verb_subj | [
"def",
"_verbs_with_subjects",
"(",
"doc",
")",
":",
"# TODO: UNUSED",
"verb_subj",
"=",
"[",
"]",
"for",
"possible_subject",
"in",
"doc",
":",
"if",
"(",
"possible_subject",
".",
"dep_",
"==",
"'nsubj'",
"and",
"possible_subject",
".",
"head",
".",
"pos_",
... | Given a spacy document return the verbs that have subjects | [
"Given",
"a",
"spacy",
"document",
"return",
"the",
"verbs",
"that",
"have",
"subjects"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qmangle/qmangle/__init__.py#L19-L27 |
26,132 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qmangle/qmangle/__init__.py | mangle_agreement | def mangle_agreement(correct_sentence):
"""Given a correct sentence, return a sentence or sentences with a subject
verb agreement error"""
# # Examples
#
# Back in the 1800s, people were much shorter and much stronger.
# This sentence begins with the introductory phrase, 'back in the 1800s'
# which means that it should have the past tense verb. Any other verb would
# be incorrect.
#
#
# Jack and jill went up the hill.
# This sentence is different; 'go' would also be correct. If it began with
# 'Yesterday', a single-word introductory phrase requiring no comma, only
# 'went' would be acceptable.
#
#
# The man in the checkered shirt danced his warrior dance to show that
# he was the most dominant male in the room.
# This sentence has multiple verbs. If the sentence ended at the word dance,
# changing 'danced' to 'dances' would be acceptable, but since the sentence
# continues we cannot make this change -- 'was' agrees with 'danced' but not
# with 'dances'. This is a shifty tense error, a classic subject verb
# agreement error.
#
# # Our Method
#
# Right now, we will assume that any change in verb form of a single verb in
# a sentence is incorrect. As demonstrated above, this is not always true.
# We hope that since any model created off of this data will use a
# confidence interval to determine likelihood of a subject-verb agreement
# error, that some number can be found for which the model excels.
#
# It would also be possible to use a rule based learner to evaluate single
# verb sentences, and only evaluating more complex sentences with the
# tensorflow model.
bad_sents = []
doc = nlp(correct_sentence)
verbs = [(i, v) for (i, v) in enumerate(doc) if v.tag_.startswith('VB')]
for i, v in verbs:
for alt_verb in lexeme(doc[i].text):
if alt_verb == doc[i].text:
continue # Same as the original, skip it
if (tenses(alt_verb) == tenses(v.text) or
(alt_verb.startswith(v.text) and alt_verb.endswith("n't"))):
continue # Negated version of the original, skip it
new_sent = str(doc[:i]) + " {} ".format(alt_verb) + str(doc[i+1:])
new_sent = new_sent.replace(' ,', ',') # fix space before comma
bad_sents.append(new_sent)
return bad_sents | python | def mangle_agreement(correct_sentence):
# # Examples
#
# Back in the 1800s, people were much shorter and much stronger.
# This sentence begins with the introductory phrase, 'back in the 1800s'
# which means that it should have the past tense verb. Any other verb would
# be incorrect.
#
#
# Jack and jill went up the hill.
# This sentence is different; 'go' would also be correct. If it began with
# 'Yesterday', a single-word introductory phrase requiring no comma, only
# 'went' would be acceptable.
#
#
# The man in the checkered shirt danced his warrior dance to show that
# he was the most dominant male in the room.
# This sentence has multiple verbs. If the sentence ended at the word dance,
# changing 'danced' to 'dances' would be acceptable, but since the sentence
# continues we cannot make this change -- 'was' agrees with 'danced' but not
# with 'dances'. This is a shifty tense error, a classic subject verb
# agreement error.
#
# # Our Method
#
# Right now, we will assume that any change in verb form of a single verb in
# a sentence is incorrect. As demonstrated above, this is not always true.
# We hope that since any model created off of this data will use a
# confidence interval to determine likelihood of a subject-verb agreement
# error, that some number can be found for which the model excels.
#
# It would also be possible to use a rule based learner to evaluate single
# verb sentences, and only evaluating more complex sentences with the
# tensorflow model.
bad_sents = []
doc = nlp(correct_sentence)
verbs = [(i, v) for (i, v) in enumerate(doc) if v.tag_.startswith('VB')]
for i, v in verbs:
for alt_verb in lexeme(doc[i].text):
if alt_verb == doc[i].text:
continue # Same as the original, skip it
if (tenses(alt_verb) == tenses(v.text) or
(alt_verb.startswith(v.text) and alt_verb.endswith("n't"))):
continue # Negated version of the original, skip it
new_sent = str(doc[:i]) + " {} ".format(alt_verb) + str(doc[i+1:])
new_sent = new_sent.replace(' ,', ',') # fix space before comma
bad_sents.append(new_sent)
return bad_sents | [
"def",
"mangle_agreement",
"(",
"correct_sentence",
")",
":",
"# # Examples",
"#",
"# Back in the 1800s, people were much shorter and much stronger.",
"# This sentence begins with the introductory phrase, 'back in the 1800s'",
"# which means that it should have the past tense verb. Any other ver... | Given a correct sentence, return a sentence or sentences with a subject
verb agreement error | [
"Given",
"a",
"correct",
"sentence",
"return",
"a",
"sentence",
"or",
"sentences",
"with",
"a",
"subject",
"verb",
"agreement",
"error"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qmangle/qmangle/__init__.py#L83-L133 |
26,133 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/__init__.py | _build_trigram_indices | def _build_trigram_indices(trigram_index):
"""Build a dictionary of trigrams and their indices from a csv"""
result = {}
trigram_count = 0
for key, val in csv.reader(open(trigram_index)):
result[key] = int(val)
trigram_count += 1
return result, trigram_count | python | def _build_trigram_indices(trigram_index):
result = {}
trigram_count = 0
for key, val in csv.reader(open(trigram_index)):
result[key] = int(val)
trigram_count += 1
return result, trigram_count | [
"def",
"_build_trigram_indices",
"(",
"trigram_index",
")",
":",
"result",
"=",
"{",
"}",
"trigram_count",
"=",
"0",
"for",
"key",
",",
"val",
"in",
"csv",
".",
"reader",
"(",
"open",
"(",
"trigram_index",
")",
")",
":",
"result",
"[",
"key",
"]",
"=",... | Build a dictionary of trigrams and their indices from a csv | [
"Build",
"a",
"dictionary",
"of",
"trigrams",
"and",
"their",
"indices",
"from",
"a",
"csv"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/__init__.py#L48-L55 |
26,134 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/__init__.py | _begins_with_one_of | def _begins_with_one_of(sentence, parts_of_speech):
"""Return True if the sentence or fragment begins with one of the parts of
speech in the list, else False"""
doc = nlp(sentence)
if doc[0].tag_ in parts_of_speech:
return True
return False | python | def _begins_with_one_of(sentence, parts_of_speech):
doc = nlp(sentence)
if doc[0].tag_ in parts_of_speech:
return True
return False | [
"def",
"_begins_with_one_of",
"(",
"sentence",
",",
"parts_of_speech",
")",
":",
"doc",
"=",
"nlp",
"(",
"sentence",
")",
"if",
"doc",
"[",
"0",
"]",
".",
"tag_",
"in",
"parts_of_speech",
":",
"return",
"True",
"return",
"False"
] | Return True if the sentence or fragment begins with one of the parts of
speech in the list, else False | [
"Return",
"True",
"if",
"the",
"sentence",
"or",
"fragment",
"begins",
"with",
"one",
"of",
"the",
"parts",
"of",
"speech",
"in",
"the",
"list",
"else",
"False"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/__init__.py#L102-L108 |
26,135 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/__init__.py | get_language_tool_feedback | def get_language_tool_feedback(sentence):
"""Get matches from languagetool"""
payload = {'language':'en-US', 'text':sentence}
try:
r = requests.post(LT_SERVER, data=payload)
except requests.exceptions.ConnectionError as e:
raise requests.exceptions.ConnectionError('''The languagetool server is
not running. Try starting it with "ltserver" ''')
if r.status_code >= 200 and r.status_code < 300:
return r.json().get('matches', [])
return [] | python | def get_language_tool_feedback(sentence):
payload = {'language':'en-US', 'text':sentence}
try:
r = requests.post(LT_SERVER, data=payload)
except requests.exceptions.ConnectionError as e:
raise requests.exceptions.ConnectionError('''The languagetool server is
not running. Try starting it with "ltserver" ''')
if r.status_code >= 200 and r.status_code < 300:
return r.json().get('matches', [])
return [] | [
"def",
"get_language_tool_feedback",
"(",
"sentence",
")",
":",
"payload",
"=",
"{",
"'language'",
":",
"'en-US'",
",",
"'text'",
":",
"sentence",
"}",
"try",
":",
"r",
"=",
"requests",
".",
"post",
"(",
"LT_SERVER",
",",
"data",
"=",
"payload",
")",
"ex... | Get matches from languagetool | [
"Get",
"matches",
"from",
"languagetool"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/__init__.py#L134-L144 |
26,136 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/__init__.py | is_participle_clause_fragment | def is_participle_clause_fragment(sentence):
"""Supply a sentence or fragment and recieve a confidence interval"""
# short circuit if sentence or fragment doesn't start with a participle
# past participles can sometimes look like adjectives -- ie, Tired
if not _begins_with_one_of(sentence, ['VBG', 'VBN', 'JJ']):
return 0.0
if _begins_with_one_of(sentence, ['JJ']):
doc = nlp(sentence)
fw = [w for w in doc][0]
# Beautiful toy birds
if fw.dep_ == 'amod':
return 0.0
# short circuit if sentence starts with a gerund and the gerund is the
# subject.
if _begins_with_one_of(sentence, ['VBG']):
doc = nlp(sentence)
fw = [w for w in doc][0]
# Running is fun
if fw.dep_.endswith('subj'):
return 0.0
fc = [c for c in doc.noun_chunks]
# Dancing boys can never sing
if str(fw) in str(fc):
return 0.0
positive_prob = models['participle'].predict([_text_to_vector(sentence,
trigram2idx['participle'], trigram_count['participle'])])[0][1]
return float(positive_prob) | python | def is_participle_clause_fragment(sentence):
# short circuit if sentence or fragment doesn't start with a participle
# past participles can sometimes look like adjectives -- ie, Tired
if not _begins_with_one_of(sentence, ['VBG', 'VBN', 'JJ']):
return 0.0
if _begins_with_one_of(sentence, ['JJ']):
doc = nlp(sentence)
fw = [w for w in doc][0]
# Beautiful toy birds
if fw.dep_ == 'amod':
return 0.0
# short circuit if sentence starts with a gerund and the gerund is the
# subject.
if _begins_with_one_of(sentence, ['VBG']):
doc = nlp(sentence)
fw = [w for w in doc][0]
# Running is fun
if fw.dep_.endswith('subj'):
return 0.0
fc = [c for c in doc.noun_chunks]
# Dancing boys can never sing
if str(fw) in str(fc):
return 0.0
positive_prob = models['participle'].predict([_text_to_vector(sentence,
trigram2idx['participle'], trigram_count['participle'])])[0][1]
return float(positive_prob) | [
"def",
"is_participle_clause_fragment",
"(",
"sentence",
")",
":",
"# short circuit if sentence or fragment doesn't start with a participle",
"# past participles can sometimes look like adjectives -- ie, Tired",
"if",
"not",
"_begins_with_one_of",
"(",
"sentence",
",",
"[",
"'VBG'",
... | Supply a sentence or fragment and recieve a confidence interval | [
"Supply",
"a",
"sentence",
"or",
"fragment",
"and",
"recieve",
"a",
"confidence",
"interval"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/__init__.py#L147-L178 |
26,137 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/__init__.py | check | def check(sentence):
"""Supply a sentence or fragment and recieve feedback"""
# How we decide what to put as the human readable feedback
#
# Our order of prefence is,
#
# 1. Spelling errors.
# - A spelling error can change the sentence meaning
# 2. Subject-verb agreement errors
# 3. Subordinate conjunction starting a sentence
# 4. Participle phrase fragment
# 5. Other errors
result = Feedback()
is_missing_verb = detect_missing_verb(sentence)
is_infinitive = detect_infinitive_phrase(sentence)
is_participle = is_participle_clause_fragment(sentence)
lang_tool_feedback = get_language_tool_feedback(sentence)
subject_and_verb_agree = get_subject_verb_agreement_feedback(sentence)
####
if is_missing_verb: # Lowest priority
result.matches['missing_verb'] = True
result.human_readable = MISSING_VERB_ADVICE.replace('\n', '')
result.primary_error = 'MISSING_VERB_ERROR'
result.specific_error = 'MISSING_VERB'
if is_participle > .5:
result.matches['participle_phrase'] = is_participle
result.human_readable = PARTICIPLE_FRAGMENT_ADVICE.replace('\n', '')
result.primary_error = 'FRAGMENT_ERROR'
result.specific_error = 'PARTICIPLE_PHRASE'
if lang_tool_feedback:
result.matches['lang_tool'] = lang_tool_feedback
for ltf in lang_tool_feedback:
if ltf['rule']['id'] == 'SENTENCE_FRAGMENT':
result.human_readable = lang_tool_feedback[0]['message']
result.primary_error = 'FRAGMENT_ERROR'
result.specific_error = 'SUBORDINATE_CLAUSE'
if is_infinitive:
result.matches['infinitive_phrase'] = True
result.human_readable = INFINITIVE_PHRASE_ADVICE.replace('\n', '')
result.primary_error = 'INFINITIVE_PHRASE_ERROR'
result.specific_error = 'INFINITIVE_PHRASE'
if not subject_and_verb_agree:
result.matches['subject_verb_agreement'] = subject_and_verb_agree
result.human_readable = SUBJECT_VERB_AGREEMENT_ADVICE.replace('\n', '')
result.primary_error = 'SUBJECT_VERB_AGREEMENT_ERROR'
result.specific_error = 'SUBJECT_VERB_AGREEMENT'
if lang_tool_feedback: # Highest priority (spelling, other lang tool errors)
result.matches['lang_tool'] = lang_tool_feedback
for ltf in lang_tool_feedback:
if ltf['rule']['id'] == 'MORFOLOGIK_RULE_EN_US':
result.human_readable = ltf['message']
result.primary_error = 'SPELLING_ERROR'
result.specific_error = 'SPELLING_ERROR'
if not result.primary_error:
result.human_readable = ltf['message']
result.primary_error = 'OTHER_ERROR'
result.specific_error = ltf['rule']['id']
####
if not result.matches:
result.human_readable = STRONG_SENTENCE_ADVICE.replace('\n', '')
return result | python | def check(sentence):
# How we decide what to put as the human readable feedback
#
# Our order of prefence is,
#
# 1. Spelling errors.
# - A spelling error can change the sentence meaning
# 2. Subject-verb agreement errors
# 3. Subordinate conjunction starting a sentence
# 4. Participle phrase fragment
# 5. Other errors
result = Feedback()
is_missing_verb = detect_missing_verb(sentence)
is_infinitive = detect_infinitive_phrase(sentence)
is_participle = is_participle_clause_fragment(sentence)
lang_tool_feedback = get_language_tool_feedback(sentence)
subject_and_verb_agree = get_subject_verb_agreement_feedback(sentence)
####
if is_missing_verb: # Lowest priority
result.matches['missing_verb'] = True
result.human_readable = MISSING_VERB_ADVICE.replace('\n', '')
result.primary_error = 'MISSING_VERB_ERROR'
result.specific_error = 'MISSING_VERB'
if is_participle > .5:
result.matches['participle_phrase'] = is_participle
result.human_readable = PARTICIPLE_FRAGMENT_ADVICE.replace('\n', '')
result.primary_error = 'FRAGMENT_ERROR'
result.specific_error = 'PARTICIPLE_PHRASE'
if lang_tool_feedback:
result.matches['lang_tool'] = lang_tool_feedback
for ltf in lang_tool_feedback:
if ltf['rule']['id'] == 'SENTENCE_FRAGMENT':
result.human_readable = lang_tool_feedback[0]['message']
result.primary_error = 'FRAGMENT_ERROR'
result.specific_error = 'SUBORDINATE_CLAUSE'
if is_infinitive:
result.matches['infinitive_phrase'] = True
result.human_readable = INFINITIVE_PHRASE_ADVICE.replace('\n', '')
result.primary_error = 'INFINITIVE_PHRASE_ERROR'
result.specific_error = 'INFINITIVE_PHRASE'
if not subject_and_verb_agree:
result.matches['subject_verb_agreement'] = subject_and_verb_agree
result.human_readable = SUBJECT_VERB_AGREEMENT_ADVICE.replace('\n', '')
result.primary_error = 'SUBJECT_VERB_AGREEMENT_ERROR'
result.specific_error = 'SUBJECT_VERB_AGREEMENT'
if lang_tool_feedback: # Highest priority (spelling, other lang tool errors)
result.matches['lang_tool'] = lang_tool_feedback
for ltf in lang_tool_feedback:
if ltf['rule']['id'] == 'MORFOLOGIK_RULE_EN_US':
result.human_readable = ltf['message']
result.primary_error = 'SPELLING_ERROR'
result.specific_error = 'SPELLING_ERROR'
if not result.primary_error:
result.human_readable = ltf['message']
result.primary_error = 'OTHER_ERROR'
result.specific_error = ltf['rule']['id']
####
if not result.matches:
result.human_readable = STRONG_SENTENCE_ADVICE.replace('\n', '')
return result | [
"def",
"check",
"(",
"sentence",
")",
":",
"# How we decide what to put as the human readable feedback",
"#",
"# Our order of prefence is,",
"#",
"# 1. Spelling errors.",
"# - A spelling error can change the sentence meaning",
"# 2. Subject-verb agreement errors",
"# 3. Subordinate conju... | Supply a sentence or fragment and recieve feedback | [
"Supply",
"a",
"sentence",
"or",
"fragment",
"and",
"recieve",
"feedback"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/__init__.py#L181-L246 |
26,138 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/examples/porcupine/app.py | list_submissions | def list_submissions():
"""List the past submissions with information about them"""
submissions = []
try:
submissions = session.query(Submission).all()
except SQLAlchemyError as e:
session.rollback()
return render_template('list_submissions.html', submissions=submissions) | python | def list_submissions():
submissions = []
try:
submissions = session.query(Submission).all()
except SQLAlchemyError as e:
session.rollback()
return render_template('list_submissions.html', submissions=submissions) | [
"def",
"list_submissions",
"(",
")",
":",
"submissions",
"=",
"[",
"]",
"try",
":",
"submissions",
"=",
"session",
".",
"query",
"(",
"Submission",
")",
".",
"all",
"(",
")",
"except",
"SQLAlchemyError",
"as",
"e",
":",
"session",
".",
"rollback",
"(",
... | List the past submissions with information about them | [
"List",
"the",
"past",
"submissions",
"with",
"information",
"about",
"them"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/examples/porcupine/app.py#L49-L56 |
26,139 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/examples/porcupine/app.py | get_submissions | def get_submissions():
"""API endpoint to get submissions in JSON format"""
print(request.args.to_dict())
print(request.args.get('search[value]'))
print(request.args.get('draw', 1))
# submissions = session.query(Submission).all()
if request.args.get('correct_filter', 'all') == 'all':
correct_filter = [True, False]
elif request.args['correct_filter'] == 'correct':
correct_filter = [True]
else:
correct_filter = [False]
if request.args.get('order[0][column]', '0') == '0':
column = 'id'
elif request.args['order[0][column]'] == '1':
column = 'text'
else:
column = 'primary_error'
order_str = "{} {}".format(column, request.args.get('order[0][dir]', 'desc'))
search_val = request.args.get('search[value]')
draw = request.args.get('draw', 1)
filtered_len = session.query(Submission)\
.filter(Submission.text.startswith(search_val))\
.filter(Submission.correct.in_(correct_filter))\
.count()
subs = \
session.query(Submission).filter(Submission.text.startswith(search_val))\
.filter(Submission.correct.in_(correct_filter))\
.order_by(order_str)\
.offset(request.args.get('start', 0))\
.limit(request.args.get('length', 10))\
.all()
submissions = {'draw': draw, 'recordsTotal':0, 'recordsFiltered':0, 'data':[]}
i = 0
for i, submission in enumerate(subs):
submissions['data'].append([submission.id, submission.text,
submission.primary_error, submission.correct])
submissions['recordsTotal'] = session.query(Submission).count()
submissions['recordsFiltered'] = filtered_len
return jsonify(submissions) | python | def get_submissions():
print(request.args.to_dict())
print(request.args.get('search[value]'))
print(request.args.get('draw', 1))
# submissions = session.query(Submission).all()
if request.args.get('correct_filter', 'all') == 'all':
correct_filter = [True, False]
elif request.args['correct_filter'] == 'correct':
correct_filter = [True]
else:
correct_filter = [False]
if request.args.get('order[0][column]', '0') == '0':
column = 'id'
elif request.args['order[0][column]'] == '1':
column = 'text'
else:
column = 'primary_error'
order_str = "{} {}".format(column, request.args.get('order[0][dir]', 'desc'))
search_val = request.args.get('search[value]')
draw = request.args.get('draw', 1)
filtered_len = session.query(Submission)\
.filter(Submission.text.startswith(search_val))\
.filter(Submission.correct.in_(correct_filter))\
.count()
subs = \
session.query(Submission).filter(Submission.text.startswith(search_val))\
.filter(Submission.correct.in_(correct_filter))\
.order_by(order_str)\
.offset(request.args.get('start', 0))\
.limit(request.args.get('length', 10))\
.all()
submissions = {'draw': draw, 'recordsTotal':0, 'recordsFiltered':0, 'data':[]}
i = 0
for i, submission in enumerate(subs):
submissions['data'].append([submission.id, submission.text,
submission.primary_error, submission.correct])
submissions['recordsTotal'] = session.query(Submission).count()
submissions['recordsFiltered'] = filtered_len
return jsonify(submissions) | [
"def",
"get_submissions",
"(",
")",
":",
"print",
"(",
"request",
".",
"args",
".",
"to_dict",
"(",
")",
")",
"print",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'search[value]'",
")",
")",
"print",
"(",
"request",
".",
"args",
".",
"get",
"(",
... | API endpoint to get submissions in JSON format | [
"API",
"endpoint",
"to",
"get",
"submissions",
"in",
"JSON",
"format"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/examples/porcupine/app.py#L59-L102 |
26,140 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/examples/porcupine/app.py | check_sentence | def check_sentence():
"""Sole porcupine endpoint"""
text = ''
if request.method == 'POST':
text = request.form['text']
if not text:
error = 'No input'
flash_message = error
else:
fb = check(request.form['text'])
correct = False
if request.form.get('is_correct') and not fb.primary_error:
correct = True
elif not request.form.get('is_correct') and fb.primary_error:
correct = True
sub = Submission(text=text, correct=correct, primary_error=fb.primary_error,
specific_error=fb.specific_error)
session.add(sub)
session.commit()
# TODO: remove the hack below
if not fb.primary_error:
fb.human_readable = "No errors were found."
flash_message = fb.human_readable
flash(flash_message)
return render_template('check_sentence.html', text=text) | python | def check_sentence():
text = ''
if request.method == 'POST':
text = request.form['text']
if not text:
error = 'No input'
flash_message = error
else:
fb = check(request.form['text'])
correct = False
if request.form.get('is_correct') and not fb.primary_error:
correct = True
elif not request.form.get('is_correct') and fb.primary_error:
correct = True
sub = Submission(text=text, correct=correct, primary_error=fb.primary_error,
specific_error=fb.specific_error)
session.add(sub)
session.commit()
# TODO: remove the hack below
if not fb.primary_error:
fb.human_readable = "No errors were found."
flash_message = fb.human_readable
flash(flash_message)
return render_template('check_sentence.html', text=text) | [
"def",
"check_sentence",
"(",
")",
":",
"text",
"=",
"''",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"text",
"=",
"request",
".",
"form",
"[",
"'text'",
"]",
"if",
"not",
"text",
":",
"error",
"=",
"'No input'",
"flash_message",
"=",
"error"... | Sole porcupine endpoint | [
"Sole",
"porcupine",
"endpoint"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/examples/porcupine/app.py#L106-L131 |
26,141 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva_rb2.py | raise_double_modal_error | def raise_double_modal_error(verb_phrase_doc):
"""A modal auxilary verb should not follow another modal auxilary verb"""
prev_word = None
for word in verb_phrase:
if word.tag_ == 'MD' and prev_word.tag == 'MD':
raise('DoubleModalError')
prev_word = word | python | def raise_double_modal_error(verb_phrase_doc):
prev_word = None
for word in verb_phrase:
if word.tag_ == 'MD' and prev_word.tag == 'MD':
raise('DoubleModalError')
prev_word = word | [
"def",
"raise_double_modal_error",
"(",
"verb_phrase_doc",
")",
":",
"prev_word",
"=",
"None",
"for",
"word",
"in",
"verb_phrase",
":",
"if",
"word",
".",
"tag_",
"==",
"'MD'",
"and",
"prev_word",
".",
"tag",
"==",
"'MD'",
":",
"raise",
"(",
"'DoubleModalErr... | A modal auxilary verb should not follow another modal auxilary verb | [
"A",
"modal",
"auxilary",
"verb",
"should",
"not",
"follow",
"another",
"modal",
"auxilary",
"verb"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva_rb2.py#L97-L103 |
26,142 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva_rb2.py | raise_modal_error | def raise_modal_error(verb_phrase_doc):
"""Given a verb phrase, raise an error if the modal auxilary has an issue
with it"""
verb_phrase = verb_phrase_doc.text.lower()
bad_strings = ['should had', 'should has', 'could had', 'could has', 'would '
'had', 'would has'] ["should", "could", "would"]
for bs in bad_strings:
if bs in verb_phrase:
raise('ShouldCouldWouldError') | python | def raise_modal_error(verb_phrase_doc):
verb_phrase = verb_phrase_doc.text.lower()
bad_strings = ['should had', 'should has', 'could had', 'could has', 'would '
'had', 'would has'] ["should", "could", "would"]
for bs in bad_strings:
if bs in verb_phrase:
raise('ShouldCouldWouldError') | [
"def",
"raise_modal_error",
"(",
"verb_phrase_doc",
")",
":",
"verb_phrase",
"=",
"verb_phrase_doc",
".",
"text",
".",
"lower",
"(",
")",
"bad_strings",
"=",
"[",
"'should had'",
",",
"'should has'",
",",
"'could had'",
",",
"'could has'",
",",
"'would '",
"'had... | Given a verb phrase, raise an error if the modal auxilary has an issue
with it | [
"Given",
"a",
"verb",
"phrase",
"raise",
"an",
"error",
"if",
"the",
"modal",
"auxilary",
"has",
"an",
"issue",
"with",
"it"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva_rb2.py#L106-L114 |
26,143 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva_rb2.py | split_infinitive_warning | def split_infinitive_warning(sentence_str):
"""Return a warning for a split infinitive, else, None"""
sent_doc = textacy.Doc(sentence_str, lang='en_core_web_lg')
inf_pattern = r'<PART><ADV><VERB>' # To aux/auxpass* csubj
infinitives = textacy.extract.pos_regex_matches(sent_doc, inf_pattern)
for inf in infinitives:
if inf[0].text.lower() != 'to':
continue
if inf[-1].tag_ != 'VB':
continue
return 'SplitInfinitiveWarning' | python | def split_infinitive_warning(sentence_str):
sent_doc = textacy.Doc(sentence_str, lang='en_core_web_lg')
inf_pattern = r'<PART><ADV><VERB>' # To aux/auxpass* csubj
infinitives = textacy.extract.pos_regex_matches(sent_doc, inf_pattern)
for inf in infinitives:
if inf[0].text.lower() != 'to':
continue
if inf[-1].tag_ != 'VB':
continue
return 'SplitInfinitiveWarning' | [
"def",
"split_infinitive_warning",
"(",
"sentence_str",
")",
":",
"sent_doc",
"=",
"textacy",
".",
"Doc",
"(",
"sentence_str",
",",
"lang",
"=",
"'en_core_web_lg'",
")",
"inf_pattern",
"=",
"r'<PART><ADV><VERB>'",
"# To aux/auxpass* csubj",
"infinitives",
"=",
"textac... | Return a warning for a split infinitive, else, None | [
"Return",
"a",
"warning",
"for",
"a",
"split",
"infinitive",
"else",
"None"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva_rb2.py#L234-L244 |
26,144 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva_rb2.py | raise_infinitive_error | def raise_infinitive_error(sentence_str):
"""Given a string, check that all infinitives are properly formatted"""
sent_doc = textacy.Doc(sentence_str, lang='en_core_web_lg')
inf_pattern = r'<PART|ADP><VERB>' # To aux/auxpass* csubj
infinitives = textacy.extract.pos_regex_matches(sent_doc, inf_pattern)
for inf in infinitives:
if inf[0].text.lower() != 'to':
continue
if inf[-1].tag_ != 'VB':
raise Exception('InfinitivePhraseError') | python | def raise_infinitive_error(sentence_str):
sent_doc = textacy.Doc(sentence_str, lang='en_core_web_lg')
inf_pattern = r'<PART|ADP><VERB>' # To aux/auxpass* csubj
infinitives = textacy.extract.pos_regex_matches(sent_doc, inf_pattern)
for inf in infinitives:
if inf[0].text.lower() != 'to':
continue
if inf[-1].tag_ != 'VB':
raise Exception('InfinitivePhraseError') | [
"def",
"raise_infinitive_error",
"(",
"sentence_str",
")",
":",
"sent_doc",
"=",
"textacy",
".",
"Doc",
"(",
"sentence_str",
",",
"lang",
"=",
"'en_core_web_lg'",
")",
"inf_pattern",
"=",
"r'<PART|ADP><VERB>'",
"# To aux/auxpass* csubj",
"infinitives",
"=",
"textacy",... | Given a string, check that all infinitives are properly formatted | [
"Given",
"a",
"string",
"check",
"that",
"all",
"infinitives",
"are",
"properly",
"formatted"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva_rb2.py#L246-L255 |
26,145 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva_rb2.py | drop_modifiers | def drop_modifiers(sentence_str):
"""Given a string, drop the modifiers and return a string
without them"""
tdoc = textacy.Doc(sentence_str, lang='en_core_web_lg')
new_sent = tdoc.text
unusual_char = '形'
for tag in tdoc:
if tag.dep_.endswith('mod'):
# Replace the tag
new_sent = new_sent[:tag.idx] + unusual_char * len(tag.text) +\
new_sent[tag.idx + len(tag.text):]
new_sent = new_sent.replace(unusual_char, '')
new_sent = textacy.preprocess.normalize_whitespace(new_sent)
return new_sent | python | def drop_modifiers(sentence_str):
tdoc = textacy.Doc(sentence_str, lang='en_core_web_lg')
new_sent = tdoc.text
unusual_char = '形'
for tag in tdoc:
if tag.dep_.endswith('mod'):
# Replace the tag
new_sent = new_sent[:tag.idx] + unusual_char * len(tag.text) +\
new_sent[tag.idx + len(tag.text):]
new_sent = new_sent.replace(unusual_char, '')
new_sent = textacy.preprocess.normalize_whitespace(new_sent)
return new_sent | [
"def",
"drop_modifiers",
"(",
"sentence_str",
")",
":",
"tdoc",
"=",
"textacy",
".",
"Doc",
"(",
"sentence_str",
",",
"lang",
"=",
"'en_core_web_lg'",
")",
"new_sent",
"=",
"tdoc",
".",
"text",
"unusual_char",
"=",
"'形'",
"for",
"tag",
"in",
"tdoc",
":",
... | Given a string, drop the modifiers and return a string
without them | [
"Given",
"a",
"string",
"drop",
"the",
"modifiers",
"and",
"return",
"a",
"string",
"without",
"them"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva_rb2.py#L258-L271 |
26,146 | empirical-org/Quill-NLP-Tools-and-Datasets | quillnlp/cluster.py | cluster | def cluster(list_of_texts, num_clusters=3):
"""
Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
"""
pipeline = Pipeline([
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clust", KMeans(n_clusters=num_clusters))
])
try:
clusters = pipeline.fit_predict(list_of_texts)
except ValueError:
clusters = list(range(len(list_of_texts)))
return clusters | python | def cluster(list_of_texts, num_clusters=3):
pipeline = Pipeline([
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clust", KMeans(n_clusters=num_clusters))
])
try:
clusters = pipeline.fit_predict(list_of_texts)
except ValueError:
clusters = list(range(len(list_of_texts)))
return clusters | [
"def",
"cluster",
"(",
"list_of_texts",
",",
"num_clusters",
"=",
"3",
")",
":",
"pipeline",
"=",
"Pipeline",
"(",
"[",
"(",
"\"vect\"",
",",
"CountVectorizer",
"(",
")",
")",
",",
"(",
"\"tfidf\"",
",",
"TfidfTransformer",
"(",
")",
")",
",",
"(",
"\"... | Cluster a list of texts into a predefined number of clusters.
:param list_of_texts: a list of untokenized texts
:param num_clusters: the predefined number of clusters
:return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1] | [
"Cluster",
"a",
"list",
"of",
"texts",
"into",
"a",
"predefined",
"number",
"of",
"clusters",
"."
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/quillnlp/cluster.py#L6-L25 |
26,147 | empirical-org/Quill-NLP-Tools-and-Datasets | quillnlp/topics.py | find_topics | def find_topics(token_lists, num_topics=10):
""" Find the topics in a list of texts with Latent Dirichlet Allocation. """
dictionary = Dictionary(token_lists)
print('Number of unique words in original documents:', len(dictionary))
dictionary.filter_extremes(no_below=2, no_above=0.7)
print('Number of unique words after removing rare and common words:', len(dictionary))
corpus = [dictionary.doc2bow(tokens) for tokens in token_lists]
model = LdaModel(corpus=corpus, id2word=dictionary, num_topics=num_topics, chunksize=100, passes=5, random_state=1)
print_topics(model)
return model, dictionary | python | def find_topics(token_lists, num_topics=10):
dictionary = Dictionary(token_lists)
print('Number of unique words in original documents:', len(dictionary))
dictionary.filter_extremes(no_below=2, no_above=0.7)
print('Number of unique words after removing rare and common words:', len(dictionary))
corpus = [dictionary.doc2bow(tokens) for tokens in token_lists]
model = LdaModel(corpus=corpus, id2word=dictionary, num_topics=num_topics, chunksize=100, passes=5, random_state=1)
print_topics(model)
return model, dictionary | [
"def",
"find_topics",
"(",
"token_lists",
",",
"num_topics",
"=",
"10",
")",
":",
"dictionary",
"=",
"Dictionary",
"(",
"token_lists",
")",
"print",
"(",
"'Number of unique words in original documents:'",
",",
"len",
"(",
"dictionary",
")",
")",
"dictionary",
".",... | Find the topics in a list of texts with Latent Dirichlet Allocation. | [
"Find",
"the",
"topics",
"in",
"a",
"list",
"of",
"texts",
"with",
"Latent",
"Dirichlet",
"Allocation",
"."
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/quillnlp/topics.py#L14-L27 |
26,148 | empirical-org/Quill-NLP-Tools-and-Datasets | scrapers/gutenfetch/gutenfetch/__init__.py | fetch_bookshelf | def fetch_bookshelf(start_url, output_dir):
"""Fetch all the books off of a gutenberg project bookshelf page
example bookshelf page,
http://www.gutenberg.org/wiki/Children%27s_Fiction_(Bookshelf)
"""
# make output directory
try:
os.mkdir(OUTPUT_DIR + output_dir)
except OSError as e:
raise(e)
# fetch page
r = requests.get(start_url)
# extract links
soup = bs(r.text, 'html.parser')
book_links = soup.find_all(class_=re.compile("extiw"))
new_links = []
for el in book_links:
link = el['href']
title = el.text
bookid = link.split('/')[-1]
if bookid.isdigit():
new_link = NEW_LINK_BASE.format(bookid, bookid)
new_links.append([title, new_link])
# save links as books
for link_tup in new_links:
time.sleep(.10) # be nice to project gutenberg
r1 = requests.get(link_tup[1])
new_filename = link_tup[0].lower().replace(' ', '-').replace('\n',
'-')
new_new_filename = ''
for char in new_filename:
if char in 'abcdefghijklmnopqrstuvwxyz-':
new_new_filename += char
new_filename = new_new_filename[:MAX_FILENAME_LEN] + '.txt'
with open(OUTPUT_DIR + output_dir + '/' + new_filename, 'w+') as output_file:
output_file.write(r1.text)
return None | python | def fetch_bookshelf(start_url, output_dir):
# make output directory
try:
os.mkdir(OUTPUT_DIR + output_dir)
except OSError as e:
raise(e)
# fetch page
r = requests.get(start_url)
# extract links
soup = bs(r.text, 'html.parser')
book_links = soup.find_all(class_=re.compile("extiw"))
new_links = []
for el in book_links:
link = el['href']
title = el.text
bookid = link.split('/')[-1]
if bookid.isdigit():
new_link = NEW_LINK_BASE.format(bookid, bookid)
new_links.append([title, new_link])
# save links as books
for link_tup in new_links:
time.sleep(.10) # be nice to project gutenberg
r1 = requests.get(link_tup[1])
new_filename = link_tup[0].lower().replace(' ', '-').replace('\n',
'-')
new_new_filename = ''
for char in new_filename:
if char in 'abcdefghijklmnopqrstuvwxyz-':
new_new_filename += char
new_filename = new_new_filename[:MAX_FILENAME_LEN] + '.txt'
with open(OUTPUT_DIR + output_dir + '/' + new_filename, 'w+') as output_file:
output_file.write(r1.text)
return None | [
"def",
"fetch_bookshelf",
"(",
"start_url",
",",
"output_dir",
")",
":",
"# make output directory",
"try",
":",
"os",
".",
"mkdir",
"(",
"OUTPUT_DIR",
"+",
"output_dir",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"(",
"e",
")",
"# fetch page",
"r",
... | Fetch all the books off of a gutenberg project bookshelf page
example bookshelf page,
http://www.gutenberg.org/wiki/Children%27s_Fiction_(Bookshelf) | [
"Fetch",
"all",
"the",
"books",
"off",
"of",
"a",
"gutenberg",
"project",
"bookshelf",
"page"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/scrapers/gutenfetch/gutenfetch/__init__.py#L23-L64 |
26,149 | empirical-org/Quill-NLP-Tools-and-Datasets | quillnlp/preprocess.py | lemmatize | def lemmatize(text, lowercase=True, remove_stopwords=True):
""" Return the lemmas of the tokens in a text. """
doc = nlp(text)
if lowercase and remove_stopwords:
lemmas = [t.lemma_.lower() for t in doc if not (t.is_stop or t.orth_.lower() in STOPWORDS)]
elif lowercase:
lemmas = [t.lemma_.lower() for t in doc]
elif remove_stopwords:
lemmas = [t.lemma_ for t in doc if not (t.is_stop or t.orth_.lower() in STOPWORDS)]
else:
lemmas = [t.lemma_ for t in doc]
return lemmas | python | def lemmatize(text, lowercase=True, remove_stopwords=True):
doc = nlp(text)
if lowercase and remove_stopwords:
lemmas = [t.lemma_.lower() for t in doc if not (t.is_stop or t.orth_.lower() in STOPWORDS)]
elif lowercase:
lemmas = [t.lemma_.lower() for t in doc]
elif remove_stopwords:
lemmas = [t.lemma_ for t in doc if not (t.is_stop or t.orth_.lower() in STOPWORDS)]
else:
lemmas = [t.lemma_ for t in doc]
return lemmas | [
"def",
"lemmatize",
"(",
"text",
",",
"lowercase",
"=",
"True",
",",
"remove_stopwords",
"=",
"True",
")",
":",
"doc",
"=",
"nlp",
"(",
"text",
")",
"if",
"lowercase",
"and",
"remove_stopwords",
":",
"lemmas",
"=",
"[",
"t",
".",
"lemma_",
".",
"lower"... | Return the lemmas of the tokens in a text. | [
"Return",
"the",
"lemmas",
"of",
"the",
"tokens",
"in",
"a",
"text",
"."
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/quillnlp/preprocess.py#L8-L20 |
26,150 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva.py | inflate | def inflate(deflated_vector):
"""Given a defalated vector, inflate it into a np array and return it"""
dv = json.loads(deflated_vector)
#result = np.zeros(dv['reductions']) # some claim vector length 5555, others
#5530. this could have occurred doing remote computations? or something.
# anyhow, we will use 5555. Let's just hard code it. Gosh darnit.
result = np.zeros(5555) # some claim vector length 5555, others
for n in dv['indices']:
result[int(n)] = dv['indices'][n]
#print("Inflated vector. Length", len(result))
return result | python | def inflate(deflated_vector):
dv = json.loads(deflated_vector)
#result = np.zeros(dv['reductions']) # some claim vector length 5555, others
#5530. this could have occurred doing remote computations? or something.
# anyhow, we will use 5555. Let's just hard code it. Gosh darnit.
result = np.zeros(5555) # some claim vector length 5555, others
for n in dv['indices']:
result[int(n)] = dv['indices'][n]
#print("Inflated vector. Length", len(result))
return result | [
"def",
"inflate",
"(",
"deflated_vector",
")",
":",
"dv",
"=",
"json",
".",
"loads",
"(",
"deflated_vector",
")",
"#result = np.zeros(dv['reductions']) # some claim vector length 5555, others",
"#5530. this could have occurred doing remote computations? or something.",
"# anyhow, we ... | Given a defalated vector, inflate it into a np array and return it | [
"Given",
"a",
"defalated",
"vector",
"inflate",
"it",
"into",
"a",
"np",
"array",
"and",
"return",
"it"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva.py#L25-L35 |
26,151 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/sva.py | text_to_vector | def text_to_vector(sent_str):
"""Given a string, get it's defalted vector, inflate it, then return the
inflated vector"""
r = requests.get("{}/sva/vector".format(VECTORIZE_API), params={'s':sent_str})
return inflate(r.text) | python | def text_to_vector(sent_str):
r = requests.get("{}/sva/vector".format(VECTORIZE_API), params={'s':sent_str})
return inflate(r.text) | [
"def",
"text_to_vector",
"(",
"sent_str",
")",
":",
"r",
"=",
"requests",
".",
"get",
"(",
"\"{}/sva/vector\"",
".",
"format",
"(",
"VECTORIZE_API",
")",
",",
"params",
"=",
"{",
"'s'",
":",
"sent_str",
"}",
")",
"return",
"inflate",
"(",
"r",
".",
"te... | Given a string, get it's defalted vector, inflate it, then return the
inflated vector | [
"Given",
"a",
"string",
"get",
"it",
"s",
"defalted",
"vector",
"inflate",
"it",
"then",
"return",
"the",
"inflated",
"vector"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/sva.py#L37-L41 |
26,152 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/infinitive_phrase_detect.py | detect_missing_verb | def detect_missing_verb(sentence):
"""Return True if the sentence appears to be missing a main verb"""
# TODO: should this be relocated?
doc = nlp(sentence)
for w in doc:
if w.tag_.startswith('VB') and w.dep_ == 'ROOT':
return False # looks like there is at least 1 main verb
return True | python | def detect_missing_verb(sentence):
# TODO: should this be relocated?
doc = nlp(sentence)
for w in doc:
if w.tag_.startswith('VB') and w.dep_ == 'ROOT':
return False # looks like there is at least 1 main verb
return True | [
"def",
"detect_missing_verb",
"(",
"sentence",
")",
":",
"# TODO: should this be relocated?",
"doc",
"=",
"nlp",
"(",
"sentence",
")",
"for",
"w",
"in",
"doc",
":",
"if",
"w",
".",
"tag_",
".",
"startswith",
"(",
"'VB'",
")",
"and",
"w",
".",
"dep_",
"==... | Return True if the sentence appears to be missing a main verb | [
"Return",
"True",
"if",
"the",
"sentence",
"appears",
"to",
"be",
"missing",
"a",
"main",
"verb"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/infinitive_phrase_detect.py#L12-L19 |
26,153 | empirical-org/Quill-NLP-Tools-and-Datasets | utils/qfragment/qfragment/infinitive_phrase_detect.py | detect_infinitive_phrase | def detect_infinitive_phrase(sentence):
"""Given a string, return true if it is an infinitive phrase fragment"""
# eliminate sentences without to
if not 'to' in sentence.lower():
return False
doc = nlp(sentence)
prev_word = None
for w in doc:
# if statement will execute exactly once
if prev_word == 'to':
if w.dep_ == 'ROOT' and w.tag_.startswith('VB'):
return True # this is quite likely to be an infinitive phrase
else:
return False
prev_word = w.text.lower() | python | def detect_infinitive_phrase(sentence):
# eliminate sentences without to
if not 'to' in sentence.lower():
return False
doc = nlp(sentence)
prev_word = None
for w in doc:
# if statement will execute exactly once
if prev_word == 'to':
if w.dep_ == 'ROOT' and w.tag_.startswith('VB'):
return True # this is quite likely to be an infinitive phrase
else:
return False
prev_word = w.text.lower() | [
"def",
"detect_infinitive_phrase",
"(",
"sentence",
")",
":",
"# eliminate sentences without to",
"if",
"not",
"'to'",
"in",
"sentence",
".",
"lower",
"(",
")",
":",
"return",
"False",
"doc",
"=",
"nlp",
"(",
"sentence",
")",
"prev_word",
"=",
"None",
"for",
... | Given a string, return true if it is an infinitive phrase fragment | [
"Given",
"a",
"string",
"return",
"true",
"if",
"it",
"is",
"an",
"infinitive",
"phrase",
"fragment"
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/utils/qfragment/qfragment/infinitive_phrase_detect.py#L21-L37 |
26,154 | empirical-org/Quill-NLP-Tools-and-Datasets | quillnlp/srl.py | perform_srl | def perform_srl(responses, prompt):
""" Perform semantic role labeling on a list of responses, given a prompt."""
predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz")
sentences = [{"sentence": prompt + " " + response} for response in responses]
output = predictor.predict_batch_json(sentences)
full_output = [{"sentence": prompt + response,
"response": response,
"srl": srl} for (response, srl) in zip(responses, output)]
return full_output | python | def perform_srl(responses, prompt):
predictor = Predictor.from_path("https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz")
sentences = [{"sentence": prompt + " " + response} for response in responses]
output = predictor.predict_batch_json(sentences)
full_output = [{"sentence": prompt + response,
"response": response,
"srl": srl} for (response, srl) in zip(responses, output)]
return full_output | [
"def",
"perform_srl",
"(",
"responses",
",",
"prompt",
")",
":",
"predictor",
"=",
"Predictor",
".",
"from_path",
"(",
"\"https://s3-us-west-2.amazonaws.com/allennlp/models/srl-model-2018.05.25.tar.gz\"",
")",
"sentences",
"=",
"[",
"{",
"\"sentence\"",
":",
"prompt",
"... | Perform semantic role labeling on a list of responses, given a prompt. | [
"Perform",
"semantic",
"role",
"labeling",
"on",
"a",
"list",
"of",
"responses",
"given",
"a",
"prompt",
"."
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/quillnlp/srl.py#L4-L16 |
26,155 | empirical-org/Quill-NLP-Tools-and-Datasets | quillnlp/utils.py | detokenize | def detokenize(s):
""" Detokenize a string by removing spaces before punctuation."""
print(s)
s = re.sub("\s+([;:,\.\?!])", "\\1", s)
s = re.sub("\s+(n't)", "\\1", s)
return s | python | def detokenize(s):
print(s)
s = re.sub("\s+([;:,\.\?!])", "\\1", s)
s = re.sub("\s+(n't)", "\\1", s)
return s | [
"def",
"detokenize",
"(",
"s",
")",
":",
"print",
"(",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"\"\\s+([;:,\\.\\?!])\"",
",",
"\"\\\\1\"",
",",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"\"\\s+(n't)\"",
",",
"\"\\\\1\"",
",",
"s",
")",
"return"... | Detokenize a string by removing spaces before punctuation. | [
"Detokenize",
"a",
"string",
"by",
"removing",
"spaces",
"before",
"punctuation",
"."
] | f2ff579ddf3a556d9cdc47c5f702422fa06863d9 | https://github.com/empirical-org/Quill-NLP-Tools-and-Datasets/blob/f2ff579ddf3a556d9cdc47c5f702422fa06863d9/quillnlp/utils.py#L4-L9 |
26,156 | ejeschke/ginga | ginga/misc/Task.py | Task.start | def start(self):
"""This method starts a task executing and returns immediately.
Subclass should override this method, if it has an asynchronous
way to start the task and return immediately.
"""
if self.threadPool:
self.threadPool.addTask(self)
# Lets other threads have a chance to run
time.sleep(0)
else:
raise TaskError("start(): nothing to start for task %s" % self) | python | def start(self):
if self.threadPool:
self.threadPool.addTask(self)
# Lets other threads have a chance to run
time.sleep(0)
else:
raise TaskError("start(): nothing to start for task %s" % self) | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"threadPool",
":",
"self",
".",
"threadPool",
".",
"addTask",
"(",
"self",
")",
"# Lets other threads have a chance to run",
"time",
".",
"sleep",
"(",
"0",
")",
"else",
":",
"raise",
"TaskError",
"... | This method starts a task executing and returns immediately.
Subclass should override this method, if it has an asynchronous
way to start the task and return immediately. | [
"This",
"method",
"starts",
"a",
"task",
"executing",
"and",
"returns",
"immediately",
".",
"Subclass",
"should",
"override",
"this",
"method",
"if",
"it",
"has",
"an",
"asynchronous",
"way",
"to",
"start",
"the",
"task",
"and",
"return",
"immediately",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L114-L125 |
26,157 | ejeschke/ginga | ginga/misc/Task.py | Task.init_and_start | def init_and_start(self, taskParent, override={}):
"""Convenience method to initialize and start a task.
"""
tag = self.initialize(taskParent, override=override)
self.start()
return tag | python | def init_and_start(self, taskParent, override={}):
tag = self.initialize(taskParent, override=override)
self.start()
return tag | [
"def",
"init_and_start",
"(",
"self",
",",
"taskParent",
",",
"override",
"=",
"{",
"}",
")",
":",
"tag",
"=",
"self",
".",
"initialize",
"(",
"taskParent",
",",
"override",
"=",
"override",
")",
"self",
".",
"start",
"(",
")",
"return",
"tag"
] | Convenience method to initialize and start a task. | [
"Convenience",
"method",
"to",
"initialize",
"and",
"start",
"a",
"task",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L127-L133 |
26,158 | ejeschke/ginga | ginga/misc/Task.py | Task.wait | def wait(self, timeout=None):
"""This method waits for an executing task to finish.
Subclass can override this method if necessary.
"""
self.ev_done.wait(timeout=timeout)
if not self.ev_done.is_set():
raise TaskTimeout("Task %s timed out." % self)
# --> self.result is set
# If it is an exception, then raise it in this waiter
if isinstance(self.result, Exception):
raise self.result
# Release waiters and perform callbacks
# done() has already been called, because of self.ev_done check
# "asynchronous" tasks should could call done() here
#self.done(self.result)
return self.result | python | def wait(self, timeout=None):
self.ev_done.wait(timeout=timeout)
if not self.ev_done.is_set():
raise TaskTimeout("Task %s timed out." % self)
# --> self.result is set
# If it is an exception, then raise it in this waiter
if isinstance(self.result, Exception):
raise self.result
# Release waiters and perform callbacks
# done() has already been called, because of self.ev_done check
# "asynchronous" tasks should could call done() here
#self.done(self.result)
return self.result | [
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"ev_done",
".",
"wait",
"(",
"timeout",
"=",
"timeout",
")",
"if",
"not",
"self",
".",
"ev_done",
".",
"is_set",
"(",
")",
":",
"raise",
"TaskTimeout",
"(",
"\"Task %s ti... | This method waits for an executing task to finish.
Subclass can override this method if necessary. | [
"This",
"method",
"waits",
"for",
"an",
"executing",
"task",
"to",
"finish",
".",
"Subclass",
"can",
"override",
"this",
"method",
"if",
"necessary",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L173-L192 |
26,159 | ejeschke/ginga | ginga/misc/Task.py | Task.done | def done(self, result, noraise=False):
"""This method is called when a task has finished executing.
Subclass can override this method if desired, but should call
superclass method at the end.
"""
# [??] Should this be in a critical section?
# Has done() already been called on this task?
if self.ev_done.is_set():
# ??
if isinstance(self.result, Exception) and (not noraise):
raise self.result
return self.result
# calculate running time and other finalization
self.endtime = time.time()
try:
self.totaltime = self.endtime - self.starttime
except AttributeError:
# task was not initialized properly
self.totaltime = 0.0
self.result = result
# Release thread waiters
self.ev_done.set()
# Perform callbacks for event-style waiters
self.make_callback('resolved', self.result)
# If the result is an exception, then our final act is to raise
# it in the caller, unless the caller explicitly supressed that
if isinstance(result, Exception) and (not noraise):
raise result
return result | python | def done(self, result, noraise=False):
# [??] Should this be in a critical section?
# Has done() already been called on this task?
if self.ev_done.is_set():
# ??
if isinstance(self.result, Exception) and (not noraise):
raise self.result
return self.result
# calculate running time and other finalization
self.endtime = time.time()
try:
self.totaltime = self.endtime - self.starttime
except AttributeError:
# task was not initialized properly
self.totaltime = 0.0
self.result = result
# Release thread waiters
self.ev_done.set()
# Perform callbacks for event-style waiters
self.make_callback('resolved', self.result)
# If the result is an exception, then our final act is to raise
# it in the caller, unless the caller explicitly supressed that
if isinstance(result, Exception) and (not noraise):
raise result
return result | [
"def",
"done",
"(",
"self",
",",
"result",
",",
"noraise",
"=",
"False",
")",
":",
"# [??] Should this be in a critical section?",
"# Has done() already been called on this task?",
"if",
"self",
".",
"ev_done",
".",
"is_set",
"(",
")",
":",
"# ??",
"if",
"isinstance... | This method is called when a task has finished executing.
Subclass can override this method if desired, but should call
superclass method at the end. | [
"This",
"method",
"is",
"called",
"when",
"a",
"task",
"has",
"finished",
"executing",
".",
"Subclass",
"can",
"override",
"this",
"method",
"if",
"desired",
"but",
"should",
"call",
"superclass",
"method",
"at",
"the",
"end",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L210-L244 |
26,160 | ejeschke/ginga | ginga/misc/Task.py | Task.runTask | def runTask(self, task, timeout=None):
"""Run a child task to completion. Returns the result of
the child task.
"""
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res | python | def runTask(self, task, timeout=None):
# Initialize the task.
task.initialize(self)
# Start the task.
task.start()
# Lets other threads run
time.sleep(0)
# Wait for it to finish.
res = task.wait(timeout=timeout)
# Now we're done
return res | [
"def",
"runTask",
"(",
"self",
",",
"task",
",",
"timeout",
"=",
"None",
")",
":",
"# Initialize the task.",
"task",
".",
"initialize",
"(",
"self",
")",
"# Start the task.",
"task",
".",
"start",
"(",
")",
"# Lets other threads run",
"time",
".",
"sleep",
"... | Run a child task to completion. Returns the result of
the child task. | [
"Run",
"a",
"child",
"task",
"to",
"completion",
".",
"Returns",
"the",
"result",
"of",
"the",
"child",
"task",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L263-L280 |
26,161 | ejeschke/ginga | ginga/misc/Task.py | SequentialTaskset.execute | def execute(self):
"""Run all child tasks, in order, waiting for completion of each.
Return the result of the final child task's execution.
"""
while self.index < len(self.tasklist):
res = self.step()
self.logger.debug('SeqSet task %i has completed with result %s' %
(self.index, res))
# Returns result of last task to quit
return res | python | def execute(self):
while self.index < len(self.tasklist):
res = self.step()
self.logger.debug('SeqSet task %i has completed with result %s' %
(self.index, res))
# Returns result of last task to quit
return res | [
"def",
"execute",
"(",
"self",
")",
":",
"while",
"self",
".",
"index",
"<",
"len",
"(",
"self",
".",
"tasklist",
")",
":",
"res",
"=",
"self",
".",
"step",
"(",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'SeqSet task %i has completed with result %s... | Run all child tasks, in order, waiting for completion of each.
Return the result of the final child task's execution. | [
"Run",
"all",
"child",
"tasks",
"in",
"order",
"waiting",
"for",
"completion",
"of",
"each",
".",
"Return",
"the",
"result",
"of",
"the",
"final",
"child",
"task",
"s",
"execution",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L428-L438 |
26,162 | ejeschke/ginga | ginga/misc/Task.py | oldConcurrentAndTaskset.execute | def execute(self):
"""Run all child tasks concurrently in separate threads.
Return 0 after all child tasks have completed execution.
"""
self.count = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Register termination callbacks for all my child tasks.
for task in list(self.taskseq):
self.taskset.append(task)
task.add_callback('resolved', self.child_done, self.count)
self.count += 1
self.numtasks = self.count
# Now start each child task.
with self.regcond:
for task in list(self.taskset):
task.initialize(self)
task.start()
# Account for time needed to start subtasks
self.totaltime = time.time() - self.totaltime
# Now give up the critical section and wait for last child
# task to terminate.
while self.count > 0:
self.regcond.wait()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
return 0 | python | def execute(self):
self.count = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Register termination callbacks for all my child tasks.
for task in list(self.taskseq):
self.taskset.append(task)
task.add_callback('resolved', self.child_done, self.count)
self.count += 1
self.numtasks = self.count
# Now start each child task.
with self.regcond:
for task in list(self.taskset):
task.initialize(self)
task.start()
# Account for time needed to start subtasks
self.totaltime = time.time() - self.totaltime
# Now give up the critical section and wait for last child
# task to terminate.
while self.count > 0:
self.regcond.wait()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
return 0 | [
"def",
"execute",
"(",
"self",
")",
":",
"self",
".",
"count",
"=",
"0",
"self",
".",
"taskset",
"=",
"[",
"]",
"self",
".",
"results",
"=",
"{",
"}",
"self",
".",
"totaltime",
"=",
"time",
".",
"time",
"(",
")",
"# Register termination callbacks for a... | Run all child tasks concurrently in separate threads.
Return 0 after all child tasks have completed execution. | [
"Run",
"all",
"child",
"tasks",
"concurrently",
"in",
"separate",
"threads",
".",
"Return",
"0",
"after",
"all",
"child",
"tasks",
"have",
"completed",
"execution",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L473-L514 |
26,163 | ejeschke/ginga | ginga/misc/Task.py | newConcurrentAndTaskset.execute | def execute(self):
"""Run all child tasks concurrently in separate threads.
Return last result after all child tasks have completed execution.
"""
with self._lock_c:
self.count = 0
self.numtasks = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Start all tasks
for task in self.taskseq:
self.taskset.append(task)
self.numtasks += 1
task.init_and_start(self)
num_tasks = self.getNumTasks()
# Wait on each task to clean up results
while num_tasks > 0:
self.check_state()
for i in range(num_tasks):
try:
try:
task = self.getTask(i)
except IndexError:
# A task got deleted from the set. Jump back out
# to outer loop and repoll the number of tasks
break
#self.logger.debug("waiting on %s" % task)
res = task.wait(timeout=self.idletime)
#self.logger.debug("finished: %s" % task)
self.child_done(res, task)
except TaskTimeout:
continue
except Exception as e:
#self.logger.warning("Subtask propagated exception: %s" % str(e))
self.child_done(e, task)
continue
# wait a bit and try again
#self.ev_quit.wait(self.idletime)
# re-get number of tasks, in case some were added or deleted
num_tasks = self.getNumTasks()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
# Return value of last child to complete
return value | python | def execute(self):
with self._lock_c:
self.count = 0
self.numtasks = 0
self.taskset = []
self.results = {}
self.totaltime = time.time()
# Start all tasks
for task in self.taskseq:
self.taskset.append(task)
self.numtasks += 1
task.init_and_start(self)
num_tasks = self.getNumTasks()
# Wait on each task to clean up results
while num_tasks > 0:
self.check_state()
for i in range(num_tasks):
try:
try:
task = self.getTask(i)
except IndexError:
# A task got deleted from the set. Jump back out
# to outer loop and repoll the number of tasks
break
#self.logger.debug("waiting on %s" % task)
res = task.wait(timeout=self.idletime)
#self.logger.debug("finished: %s" % task)
self.child_done(res, task)
except TaskTimeout:
continue
except Exception as e:
#self.logger.warning("Subtask propagated exception: %s" % str(e))
self.child_done(e, task)
continue
# wait a bit and try again
#self.ev_quit.wait(self.idletime)
# re-get number of tasks, in case some were added or deleted
num_tasks = self.getNumTasks()
# Scan results for errors (exceptions) and raise the first one we find
for key in self.results.keys():
value = self.results[key]
if isinstance(value, Exception):
(count, task) = key
self.logger.error("Child task %s terminated with exception: %s" % (
task.tag, str(value)))
raise value
# Return value of last child to complete
return value | [
"def",
"execute",
"(",
"self",
")",
":",
"with",
"self",
".",
"_lock_c",
":",
"self",
".",
"count",
"=",
"0",
"self",
".",
"numtasks",
"=",
"0",
"self",
".",
"taskset",
"=",
"[",
"]",
"self",
".",
"results",
"=",
"{",
"}",
"self",
".",
"totaltime... | Run all child tasks concurrently in separate threads.
Return last result after all child tasks have completed execution. | [
"Run",
"all",
"child",
"tasks",
"concurrently",
"in",
"separate",
"threads",
".",
"Return",
"last",
"result",
"after",
"all",
"child",
"tasks",
"have",
"completed",
"execution",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L572-L634 |
26,164 | ejeschke/ginga | ginga/misc/Task.py | WorkerThread.execute | def execute(self, task):
"""Execute a task.
"""
taskid = str(task)
res = None
try:
# Try to run the task. If we catch an exception, then
# it becomes the result.
self.time_start = time.time()
self.setstatus('executing %s' % taskid)
self.logger.debug("now executing task '%s'" % taskid)
try:
res = task.execute()
except UserTaskException as e:
res = e
except Exception as e:
self.logger.error("Task '%s' raised exception: %s" %
(str(task), str(e)))
res = e
try:
(type, value, tb) = sys.exc_info()
self.logger.debug("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
# NOTE: to avoid creating a cycle that might cause
# problems for GC--see Python library doc for sys
# module
tb = None
except Exception as e:
self.logger.debug("Traceback information unavailable.")
finally:
self.logger.debug("done executing task '%s'" % str(task))
self.setstatus('cleaning %s' % taskid)
# Wake up waiters on other threads
task.done(res, noraise=True)
self.time_start = 0.0
self.setstatus('idle') | python | def execute(self, task):
taskid = str(task)
res = None
try:
# Try to run the task. If we catch an exception, then
# it becomes the result.
self.time_start = time.time()
self.setstatus('executing %s' % taskid)
self.logger.debug("now executing task '%s'" % taskid)
try:
res = task.execute()
except UserTaskException as e:
res = e
except Exception as e:
self.logger.error("Task '%s' raised exception: %s" %
(str(task), str(e)))
res = e
try:
(type, value, tb) = sys.exc_info()
self.logger.debug("Traceback:\n%s" %
"".join(traceback.format_tb(tb)))
# NOTE: to avoid creating a cycle that might cause
# problems for GC--see Python library doc for sys
# module
tb = None
except Exception as e:
self.logger.debug("Traceback information unavailable.")
finally:
self.logger.debug("done executing task '%s'" % str(task))
self.setstatus('cleaning %s' % taskid)
# Wake up waiters on other threads
task.done(res, noraise=True)
self.time_start = 0.0
self.setstatus('idle') | [
"def",
"execute",
"(",
"self",
",",
"task",
")",
":",
"taskid",
"=",
"str",
"(",
"task",
")",
"res",
"=",
"None",
"try",
":",
"# Try to run the task. If we catch an exception, then",
"# it becomes the result.",
"self",
".",
"time_start",
"=",
"time",
".",
"time... | Execute a task. | [
"Execute",
"a",
"task",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L868-L912 |
26,165 | ejeschke/ginga | ginga/misc/Task.py | ThreadPool.startall | def startall(self, wait=False, **kwdargs):
"""Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor.
"""
self.logger.debug("startall called")
with self.regcond:
while self.status != 'down':
if self.status in ('start', 'up') or self.ev_quit.is_set():
# For now, abandon additional request to start
self.logger.error("ignoring duplicate request to start thread pool")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'down')
if self.ev_quit.is_set():
return
self.runningcount = 0
self.status = 'start'
self.workers = []
if wait:
tpool = self
else:
tpool = None
# Start all worker threads
self.logger.debug("starting threads in thread pool")
for i in range(self.numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=tpool,
**kwdargs)
self.workers.append(t)
t.start()
# if started with wait=True, then expect that threads will register
# themselves and last one up will set status to "up"
if wait:
# Threads are on the way up. Wait until last one starts.
while self.status != 'up' and not self.ev_quit.is_set():
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
else:
# otherwise, we just assume the pool is up
self.status = 'up'
self.logger.debug("startall done") | python | def startall(self, wait=False, **kwdargs):
self.logger.debug("startall called")
with self.regcond:
while self.status != 'down':
if self.status in ('start', 'up') or self.ev_quit.is_set():
# For now, abandon additional request to start
self.logger.error("ignoring duplicate request to start thread pool")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'down')
if self.ev_quit.is_set():
return
self.runningcount = 0
self.status = 'start'
self.workers = []
if wait:
tpool = self
else:
tpool = None
# Start all worker threads
self.logger.debug("starting threads in thread pool")
for i in range(self.numthreads):
t = self.workerClass(self.queue, logger=self.logger,
ev_quit=self.ev_quit, tpool=tpool,
**kwdargs)
self.workers.append(t)
t.start()
# if started with wait=True, then expect that threads will register
# themselves and last one up will set status to "up"
if wait:
# Threads are on the way up. Wait until last one starts.
while self.status != 'up' and not self.ev_quit.is_set():
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
else:
# otherwise, we just assume the pool is up
self.status = 'up'
self.logger.debug("startall done") | [
"def",
"startall",
"(",
"self",
",",
"wait",
"=",
"False",
",",
"*",
"*",
"kwdargs",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"startall called\"",
")",
"with",
"self",
".",
"regcond",
":",
"while",
"self",
".",
"status",
"!=",
"'down'",
... | Start all of the threads in the thread pool. If _wait_ is True
then don't return until all threads are up and running. Any extra
keyword arguments are passed to the worker thread constructor. | [
"Start",
"all",
"of",
"the",
"threads",
"in",
"the",
"thread",
"pool",
".",
"If",
"_wait_",
"is",
"True",
"then",
"don",
"t",
"return",
"until",
"all",
"threads",
"are",
"up",
"and",
"running",
".",
"Any",
"extra",
"keyword",
"arguments",
"are",
"passed"... | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L999-L1048 |
26,166 | ejeschke/ginga | ginga/misc/Task.py | ThreadPool.stopall | def stopall(self, wait=False):
"""Stop all threads in the worker pool. If _wait_ is True
then don't return until all threads are down.
"""
self.logger.debug("stopall called")
with self.regcond:
while self.status != 'up':
if self.status in ('stop', 'down') or self.ev_quit.is_set():
# For now, silently abandon additional request to stop
self.logger.warning("ignoring duplicate request to stop thread pool.")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'up')
self.logger.debug("stopping threads in thread pool")
self.status = 'stop'
# Signal to all threads to terminate.
self.ev_quit.set()
if wait:
# Threads are on the way down. Wait until last one quits.
while self.status != 'down':
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
self.logger.debug("stopall done") | python | def stopall(self, wait=False):
self.logger.debug("stopall called")
with self.regcond:
while self.status != 'up':
if self.status in ('stop', 'down') or self.ev_quit.is_set():
# For now, silently abandon additional request to stop
self.logger.warning("ignoring duplicate request to stop thread pool.")
return
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
#assert(self.status == 'up')
self.logger.debug("stopping threads in thread pool")
self.status = 'stop'
# Signal to all threads to terminate.
self.ev_quit.set()
if wait:
# Threads are on the way down. Wait until last one quits.
while self.status != 'down':
self.logger.debug("waiting for threads: count=%d" %
self.runningcount)
self.regcond.wait()
self.logger.debug("stopall done") | [
"def",
"stopall",
"(",
"self",
",",
"wait",
"=",
"False",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"stopall called\"",
")",
"with",
"self",
".",
"regcond",
":",
"while",
"self",
".",
"status",
"!=",
"'up'",
":",
"if",
"self",
".",
"statu... | Stop all threads in the worker pool. If _wait_ is True
then don't return until all threads are down. | [
"Stop",
"all",
"threads",
"in",
"the",
"worker",
"pool",
".",
"If",
"_wait_",
"is",
"True",
"then",
"don",
"t",
"return",
"until",
"all",
"threads",
"are",
"down",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Task.py#L1064-L1093 |
26,167 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | wcs_pix_transform | def wcs_pix_transform(ct, i, format=0):
"""Computes the WCS corrected pixel value given a coordinate
transformation and the raw pixel value.
Input:
ct coordinate transformation. instance of coord_tran.
i raw pixel intensity.
format format string (optional).
Returns:
WCS corrected pixel value
"""
z1 = float(ct.z1)
z2 = float(ct.z2)
i = float(i)
yscale = 128.0 / (z2 - z1)
if (format == 'T' or format == 't'):
format = 1
if (i == 0):
t = 0.
else:
if (ct.zt == W_LINEAR):
t = ((i - 1) * (z2 - z1) / 199.0) + z1
t = max(z1, min(z2, t))
else:
t = float(i)
if (format > 1):
t = (z2 - t) * yscale
return (t) | python | def wcs_pix_transform(ct, i, format=0):
z1 = float(ct.z1)
z2 = float(ct.z2)
i = float(i)
yscale = 128.0 / (z2 - z1)
if (format == 'T' or format == 't'):
format = 1
if (i == 0):
t = 0.
else:
if (ct.zt == W_LINEAR):
t = ((i - 1) * (z2 - z1) / 199.0) + z1
t = max(z1, min(z2, t))
else:
t = float(i)
if (format > 1):
t = (z2 - t) * yscale
return (t) | [
"def",
"wcs_pix_transform",
"(",
"ct",
",",
"i",
",",
"format",
"=",
"0",
")",
":",
"z1",
"=",
"float",
"(",
"ct",
".",
"z1",
")",
"z2",
"=",
"float",
"(",
"ct",
".",
"z2",
")",
"i",
"=",
"float",
"(",
"i",
")",
"yscale",
"=",
"128.0",
"/",
... | Computes the WCS corrected pixel value given a coordinate
transformation and the raw pixel value.
Input:
ct coordinate transformation. instance of coord_tran.
i raw pixel intensity.
format format string (optional).
Returns:
WCS corrected pixel value | [
"Computes",
"the",
"WCS",
"corrected",
"pixel",
"value",
"given",
"a",
"coordinate",
"transformation",
"and",
"the",
"raw",
"pixel",
"value",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L947-L977 |
26,168 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_DataListener.handle_request | def handle_request(self):
"""
Handles incoming connections, one at the time.
"""
try:
(request, client_address) = self.get_request()
except socket.error as e:
# Error handling goes here.
self.logger.error("error opening the connection: %s" % (
str(e)))
for exctn in sys.exc_info():
print(exctn)
return
try:
self.RequestHandlerClass(request, client_address, self)
except Exception as e:
# Error handling goes here.
self.logger.error('error handling the request: %s' % (
str(e)))
for exctn in sys.exc_info():
print(exctn)
return | python | def handle_request(self):
try:
(request, client_address) = self.get_request()
except socket.error as e:
# Error handling goes here.
self.logger.error("error opening the connection: %s" % (
str(e)))
for exctn in sys.exc_info():
print(exctn)
return
try:
self.RequestHandlerClass(request, client_address, self)
except Exception as e:
# Error handling goes here.
self.logger.error('error handling the request: %s' % (
str(e)))
for exctn in sys.exc_info():
print(exctn)
return | [
"def",
"handle_request",
"(",
"self",
")",
":",
"try",
":",
"(",
"request",
",",
"client_address",
")",
"=",
"self",
".",
"get_request",
"(",
")",
"except",
"socket",
".",
"error",
"as",
"e",
":",
"# Error handling goes here.",
"self",
".",
"logger",
".",
... | Handles incoming connections, one at the time. | [
"Handles",
"incoming",
"connections",
"one",
"at",
"the",
"time",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L144-L167 |
26,169 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_DataListener.mainloop | def mainloop(self):
"""main control loop."""
try:
while (not self.ev_quit.is_set()):
try:
self.handle_request()
except socketTimeout:
continue
finally:
self.socket.close() | python | def mainloop(self):
try:
while (not self.ev_quit.is_set()):
try:
self.handle_request()
except socketTimeout:
continue
finally:
self.socket.close() | [
"def",
"mainloop",
"(",
"self",
")",
":",
"try",
":",
"while",
"(",
"not",
"self",
".",
"ev_quit",
".",
"is_set",
"(",
")",
")",
":",
"try",
":",
"self",
".",
"handle_request",
"(",
")",
"except",
"socketTimeout",
":",
"continue",
"finally",
":",
"se... | main control loop. | [
"main",
"control",
"loop",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L169-L179 |
26,170 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_RequestHandler.handle_feedback | def handle_feedback(self, pkt):
"""This part of the protocol is used by IRAF to erase a frame in
the framebuffers.
"""
self.logger.debug("handle feedback")
self.frame = self.decode_frameno(pkt.z & 0o7777) - 1
# erase the frame buffer
self.server.controller.init_frame(self.frame)
self.server.controller.set_frame(self.frame) | python | def handle_feedback(self, pkt):
self.logger.debug("handle feedback")
self.frame = self.decode_frameno(pkt.z & 0o7777) - 1
# erase the frame buffer
self.server.controller.init_frame(self.frame)
self.server.controller.set_frame(self.frame) | [
"def",
"handle_feedback",
"(",
"self",
",",
"pkt",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"handle feedback\"",
")",
"self",
".",
"frame",
"=",
"self",
".",
"decode_frameno",
"(",
"pkt",
".",
"z",
"&",
"0o7777",
")",
"-",
"1",
"# erase th... | This part of the protocol is used by IRAF to erase a frame in
the framebuffers. | [
"This",
"part",
"of",
"the",
"protocol",
"is",
"used",
"by",
"IRAF",
"to",
"erase",
"a",
"frame",
"in",
"the",
"framebuffers",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L379-L388 |
26,171 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_RequestHandler.handle_lut | def handle_lut(self, pkt):
"""This part of the protocol is used by IRAF to set the frame number.
"""
self.logger.debug("handle lut")
if pkt.subunit & COMMAND:
data_type = str(pkt.nbytes / 2) + 'h'
#size = struct.calcsize(data_type)
line = pkt.datain.read(pkt.nbytes)
n = len(line)
if (n < pkt.nbytes):
return
try:
x = struct.unpack(data_type, line)
except Exception as e:
self.logger.error("Error unpacking struct: %s" % (str(e)))
return
if len(x) < 14:
# pad it with zeroes
y = []
for i in range(14):
try:
y.append(x[i])
except Exception:
y.append(0)
x = y
del(y)
if len(x) == 14:
z = int(x[0])
# frames start from 1, we start from 0
self.frame = self.decode_frameno(z) - 1
if (self.frame > MAX_FRAMES):
self.logger.error("attempt to select non existing frame.")
return
# init the framebuffer
#self.server.controller.init_frame(self.frame)
try:
self.server.controller.get_frame(self.frame)
except KeyError:
self.server.controller.init_frame(self.frame)
return
self.logger.error("unable to select a frame.")
return
self.logger.error("what shall I do?") | python | def handle_lut(self, pkt):
self.logger.debug("handle lut")
if pkt.subunit & COMMAND:
data_type = str(pkt.nbytes / 2) + 'h'
#size = struct.calcsize(data_type)
line = pkt.datain.read(pkt.nbytes)
n = len(line)
if (n < pkt.nbytes):
return
try:
x = struct.unpack(data_type, line)
except Exception as e:
self.logger.error("Error unpacking struct: %s" % (str(e)))
return
if len(x) < 14:
# pad it with zeroes
y = []
for i in range(14):
try:
y.append(x[i])
except Exception:
y.append(0)
x = y
del(y)
if len(x) == 14:
z = int(x[0])
# frames start from 1, we start from 0
self.frame = self.decode_frameno(z) - 1
if (self.frame > MAX_FRAMES):
self.logger.error("attempt to select non existing frame.")
return
# init the framebuffer
#self.server.controller.init_frame(self.frame)
try:
self.server.controller.get_frame(self.frame)
except KeyError:
self.server.controller.init_frame(self.frame)
return
self.logger.error("unable to select a frame.")
return
self.logger.error("what shall I do?") | [
"def",
"handle_lut",
"(",
"self",
",",
"pkt",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"handle lut\"",
")",
"if",
"pkt",
".",
"subunit",
"&",
"COMMAND",
":",
"data_type",
"=",
"str",
"(",
"pkt",
".",
"nbytes",
"/",
"2",
")",
"+",
"'h'"... | This part of the protocol is used by IRAF to set the frame number. | [
"This",
"part",
"of",
"the",
"protocol",
"is",
"used",
"by",
"IRAF",
"to",
"set",
"the",
"frame",
"number",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L390-L438 |
26,172 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_RequestHandler.handle_imcursor | def handle_imcursor(self, pkt):
"""This part of the protocol is used by IRAF to read the cursor
position and keystrokes from the display client.
"""
self.logger.debug("handle imcursor")
if pkt.tid & IIS_READ:
if pkt.tid & IMC_SAMPLE:
self.logger.debug("SAMPLE")
# return the cursor position
wcsflag = int(pkt.z)
#wcsflag = 0
res = self.server.controller.get_keystroke()
self.return_cursor(pkt.dataout, res.x, res.y,
res.frame, wcsflag, '0', '')
else:
self.logger.debug("OTHER")
res = self.server.controller.get_keystroke()
self.logger.debug("FRAME=%d X,Y=%f,%f" % (
res.frame, res.x, res.y))
## sx = self.x
self.x = res.x
self.y = res.y
self.frame = res.frame
## sy = self.y
## frame = self.frame
#wcsflag = 1
wcsflag = 0
#self.return_cursor(pkt.dataout, sx, sy, frame, 1, key, '')
self.return_cursor(pkt.dataout, res.x, res.y,
res.frame, wcsflag, res.key, '')
else:
self.logger.debug("READ")
# read the cursor position in logical coordinates
sx = int(pkt.x)
sy = int(pkt.y)
wx = float(pkt.x)
wy = float(pkt.y)
wcs = int(pkt.z)
if wcs:
# decode the WCS info for the current frame
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
fb.ct = self.wcs_update(fb.wcs)
if fb.ct.valid:
if abs(fb.ct.a) > 0.001:
sx = int((wx - fb.ct.tx) / fb.ct.a)
if abs(fb.ct.d) > 0.001:
sy = int((wy - fb.ct.ty) / fb.ct.d)
self.server.controller.set_cursor(sx, sy) | python | def handle_imcursor(self, pkt):
self.logger.debug("handle imcursor")
if pkt.tid & IIS_READ:
if pkt.tid & IMC_SAMPLE:
self.logger.debug("SAMPLE")
# return the cursor position
wcsflag = int(pkt.z)
#wcsflag = 0
res = self.server.controller.get_keystroke()
self.return_cursor(pkt.dataout, res.x, res.y,
res.frame, wcsflag, '0', '')
else:
self.logger.debug("OTHER")
res = self.server.controller.get_keystroke()
self.logger.debug("FRAME=%d X,Y=%f,%f" % (
res.frame, res.x, res.y))
## sx = self.x
self.x = res.x
self.y = res.y
self.frame = res.frame
## sy = self.y
## frame = self.frame
#wcsflag = 1
wcsflag = 0
#self.return_cursor(pkt.dataout, sx, sy, frame, 1, key, '')
self.return_cursor(pkt.dataout, res.x, res.y,
res.frame, wcsflag, res.key, '')
else:
self.logger.debug("READ")
# read the cursor position in logical coordinates
sx = int(pkt.x)
sy = int(pkt.y)
wx = float(pkt.x)
wy = float(pkt.y)
wcs = int(pkt.z)
if wcs:
# decode the WCS info for the current frame
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
fb.ct = self.wcs_update(fb.wcs)
if fb.ct.valid:
if abs(fb.ct.a) > 0.001:
sx = int((wx - fb.ct.tx) / fb.ct.a)
if abs(fb.ct.d) > 0.001:
sy = int((wy - fb.ct.ty) / fb.ct.d)
self.server.controller.set_cursor(sx, sy) | [
"def",
"handle_imcursor",
"(",
"self",
",",
"pkt",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\"handle imcursor\"",
")",
"if",
"pkt",
".",
"tid",
"&",
"IIS_READ",
":",
"if",
"pkt",
".",
"tid",
"&",
"IMC_SAMPLE",
":",
"self",
".",
"logger",
... | This part of the protocol is used by IRAF to read the cursor
position and keystrokes from the display client. | [
"This",
"part",
"of",
"the",
"protocol",
"is",
"used",
"by",
"IRAF",
"to",
"read",
"the",
"cursor",
"position",
"and",
"keystrokes",
"from",
"the",
"display",
"client",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L632-L689 |
26,173 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_RequestHandler.handle | def handle(self):
"""
This is where the action starts.
"""
self.logger = self.server.logger
# create a packet structure
packet = iis()
packet.datain = self.rfile
packet.dataout = self.wfile
# decode the header
size = struct.calcsize('8h')
line = packet.datain.read(size)
n = len(line)
if n < size:
return
while n > 0:
try:
bytes = struct.unpack('8h', line)
except Exception:
self.logger.error('error unpacking the data.')
for exctn in sys.exc_info():
print(exctn)
# TODO: verify checksum
# decode the packet fields
subunit = bytes[2]
subunit077 = subunit & 0o77
tid = bytes[0]
x = bytes[4] & 0o177777
y = bytes[5] & 0o177777
z = bytes[6] & 0o177777
t = bytes[7] & 0o17777
ndatabytes = - bytes[1]
# are the bytes packed?
if (not(tid & PACKED)):
ndatabytes *= 2
# populate the packet structure
packet.subunit = subunit
packet.subunit077 = subunit077
packet.tid = tid
packet.x = x
packet.y = y
packet.z = z
packet.t = t
packet.nbytes = ndatabytes
# decide what to do, depending on the
# value of subunit
self.logger.debug("PACKET IS %o" % packet.subunit)
if packet.subunit077 == FEEDBACK:
self.handle_feedback(packet)
elif packet.subunit077 == LUT:
self.handle_lut(packet)
# read the next packet
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == MEMORY:
self.handle_memory(packet)
if self.needs_update:
#self.display_image()
pass
# read the next packet
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == WCS:
self.handle_wcs(packet)
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == IMCURSOR:
self.handle_imcursor(packet)
line = packet.datain.read(size)
n = len(line)
continue
else:
self.logger.debug('?NO OP (0%o)' % (packet.subunit077))
if not (packet.tid & IIS_READ):
# OK, discard the rest of the data
nbytes = packet.nbytes
while nbytes > 0:
# for (nbytes = ndatabytes; nbytes > 0; nbytes -= n):
if nbytes < SZ_FIFOBUF:
n = nbytes
else:
n = SZ_FIFOBUF
m = self.rfile.read(n)
if m <= 0:
break
nbytes -= n
# read the next packet
line = packet.datain.read(size)
n = len(line)
if n < size:
return
# <--- end of the while (n) loop
if self.needs_update:
self.display_image()
self.needs_update = False | python | def handle(self):
self.logger = self.server.logger
# create a packet structure
packet = iis()
packet.datain = self.rfile
packet.dataout = self.wfile
# decode the header
size = struct.calcsize('8h')
line = packet.datain.read(size)
n = len(line)
if n < size:
return
while n > 0:
try:
bytes = struct.unpack('8h', line)
except Exception:
self.logger.error('error unpacking the data.')
for exctn in sys.exc_info():
print(exctn)
# TODO: verify checksum
# decode the packet fields
subunit = bytes[2]
subunit077 = subunit & 0o77
tid = bytes[0]
x = bytes[4] & 0o177777
y = bytes[5] & 0o177777
z = bytes[6] & 0o177777
t = bytes[7] & 0o17777
ndatabytes = - bytes[1]
# are the bytes packed?
if (not(tid & PACKED)):
ndatabytes *= 2
# populate the packet structure
packet.subunit = subunit
packet.subunit077 = subunit077
packet.tid = tid
packet.x = x
packet.y = y
packet.z = z
packet.t = t
packet.nbytes = ndatabytes
# decide what to do, depending on the
# value of subunit
self.logger.debug("PACKET IS %o" % packet.subunit)
if packet.subunit077 == FEEDBACK:
self.handle_feedback(packet)
elif packet.subunit077 == LUT:
self.handle_lut(packet)
# read the next packet
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == MEMORY:
self.handle_memory(packet)
if self.needs_update:
#self.display_image()
pass
# read the next packet
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == WCS:
self.handle_wcs(packet)
line = packet.datain.read(size)
n = len(line)
continue
elif packet.subunit077 == IMCURSOR:
self.handle_imcursor(packet)
line = packet.datain.read(size)
n = len(line)
continue
else:
self.logger.debug('?NO OP (0%o)' % (packet.subunit077))
if not (packet.tid & IIS_READ):
# OK, discard the rest of the data
nbytes = packet.nbytes
while nbytes > 0:
# for (nbytes = ndatabytes; nbytes > 0; nbytes -= n):
if nbytes < SZ_FIFOBUF:
n = nbytes
else:
n = SZ_FIFOBUF
m = self.rfile.read(n)
if m <= 0:
break
nbytes -= n
# read the next packet
line = packet.datain.read(size)
n = len(line)
if n < size:
return
# <--- end of the while (n) loop
if self.needs_update:
self.display_image()
self.needs_update = False | [
"def",
"handle",
"(",
"self",
")",
":",
"self",
".",
"logger",
"=",
"self",
".",
"server",
".",
"logger",
"# create a packet structure",
"packet",
"=",
"iis",
"(",
")",
"packet",
".",
"datain",
"=",
"self",
".",
"rfile",
"packet",
".",
"dataout",
"=",
... | This is where the action starts. | [
"This",
"is",
"where",
"the",
"action",
"starts",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L691-L804 |
26,174 | ejeschke/ginga | experimental/plugins/IIS_DataListener.py | IIS_RequestHandler.display_image | def display_image(self, reset=1):
"""Utility routine used to display an updated frame from a framebuffer.
"""
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
if not fb.height:
width = fb.width
height = int(len(fb.buffer) / width)
fb.height = height
# display the image
if (len(fb.buffer) > 0) and (height > 0):
self.server.controller.display(self.frame, width, height,
True)
else:
self.server.controller.display(self.frame, fb.width, fb.height,
False) | python | def display_image(self, reset=1):
try:
fb = self.server.controller.get_frame(self.frame)
except KeyError:
# the selected frame does not exist, create it
fb = self.server.controller.init_frame(self.frame)
if not fb.height:
width = fb.width
height = int(len(fb.buffer) / width)
fb.height = height
# display the image
if (len(fb.buffer) > 0) and (height > 0):
self.server.controller.display(self.frame, width, height,
True)
else:
self.server.controller.display(self.frame, fb.width, fb.height,
False) | [
"def",
"display_image",
"(",
"self",
",",
"reset",
"=",
"1",
")",
":",
"try",
":",
"fb",
"=",
"self",
".",
"server",
".",
"controller",
".",
"get_frame",
"(",
"self",
".",
"frame",
")",
"except",
"KeyError",
":",
"# the selected frame does not exist, create ... | Utility routine used to display an updated frame from a framebuffer. | [
"Utility",
"routine",
"used",
"to",
"display",
"an",
"updated",
"frame",
"from",
"a",
"framebuffer",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/experimental/plugins/IIS_DataListener.py#L806-L826 |
26,175 | ejeschke/ginga | ginga/rv/plugins/Contents.py | Contents._highlight_path | def _highlight_path(self, hl_path, tf):
"""Highlight or unhighlight a single entry.
Examples
--------
>>> hl_path = self._get_hl_key(chname, image)
>>> self._highlight_path(hl_path, True)
"""
fc = self.settings.get('row_font_color', 'green')
try:
self.treeview.highlight_path(hl_path, tf, font_color=fc)
except Exception as e:
self.logger.info('Error changing highlight on treeview path '
'({0}): {1}'.format(hl_path, str(e))) | python | def _highlight_path(self, hl_path, tf):
fc = self.settings.get('row_font_color', 'green')
try:
self.treeview.highlight_path(hl_path, tf, font_color=fc)
except Exception as e:
self.logger.info('Error changing highlight on treeview path '
'({0}): {1}'.format(hl_path, str(e))) | [
"def",
"_highlight_path",
"(",
"self",
",",
"hl_path",
",",
"tf",
")",
":",
"fc",
"=",
"self",
".",
"settings",
".",
"get",
"(",
"'row_font_color'",
",",
"'green'",
")",
"try",
":",
"self",
".",
"treeview",
".",
"highlight_path",
"(",
"hl_path",
",",
"... | Highlight or unhighlight a single entry.
Examples
--------
>>> hl_path = self._get_hl_key(chname, image)
>>> self._highlight_path(hl_path, True) | [
"Highlight",
"or",
"unhighlight",
"a",
"single",
"entry",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Contents.py#L403-L418 |
26,176 | ejeschke/ginga | ginga/rv/plugins/Contents.py | Contents.update_highlights | def update_highlights(self, old_highlight_set, new_highlight_set):
"""Unhighlight the entries represented by ``old_highlight_set``
and highlight the ones represented by ``new_highlight_set``.
Both are sets of keys.
"""
if not self.gui_up:
return
un_hilite_set = old_highlight_set - new_highlight_set
re_hilite_set = new_highlight_set - old_highlight_set
# unhighlight entries that should NOT be highlighted any more
for key in un_hilite_set:
self._highlight_path(key, False)
# highlight new entries that should be
for key in re_hilite_set:
self._highlight_path(key, True) | python | def update_highlights(self, old_highlight_set, new_highlight_set):
if not self.gui_up:
return
un_hilite_set = old_highlight_set - new_highlight_set
re_hilite_set = new_highlight_set - old_highlight_set
# unhighlight entries that should NOT be highlighted any more
for key in un_hilite_set:
self._highlight_path(key, False)
# highlight new entries that should be
for key in re_hilite_set:
self._highlight_path(key, True) | [
"def",
"update_highlights",
"(",
"self",
",",
"old_highlight_set",
",",
"new_highlight_set",
")",
":",
"if",
"not",
"self",
".",
"gui_up",
":",
"return",
"un_hilite_set",
"=",
"old_highlight_set",
"-",
"new_highlight_set",
"re_hilite_set",
"=",
"new_highlight_set",
... | Unhighlight the entries represented by ``old_highlight_set``
and highlight the ones represented by ``new_highlight_set``.
Both are sets of keys. | [
"Unhighlight",
"the",
"entries",
"represented",
"by",
"old_highlight_set",
"and",
"highlight",
"the",
"ones",
"represented",
"by",
"new_highlight_set",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Contents.py#L420-L439 |
26,177 | ejeschke/ginga | ginga/rv/plugins/Catalogs.py | CatalogListing.show_selection | def show_selection(self, star):
"""This method is called when the user clicks on a plotted star in the
fitsviewer.
"""
try:
# NOTE: this works around a quirk of Qt widget set where
# selecting programatically in the table triggers the widget
# selection callback (see select_star_cb() in Catalogs.py for Qt)
self._select_flag = True
self.mark_selection(star)
finally:
self._select_flag = False | python | def show_selection(self, star):
try:
# NOTE: this works around a quirk of Qt widget set where
# selecting programatically in the table triggers the widget
# selection callback (see select_star_cb() in Catalogs.py for Qt)
self._select_flag = True
self.mark_selection(star)
finally:
self._select_flag = False | [
"def",
"show_selection",
"(",
"self",
",",
"star",
")",
":",
"try",
":",
"# NOTE: this works around a quirk of Qt widget set where",
"# selecting programatically in the table triggers the widget",
"# selection callback (see select_star_cb() in Catalogs.py for Qt)",
"self",
".",
"_selec... | This method is called when the user clicks on a plotted star in the
fitsviewer. | [
"This",
"method",
"is",
"called",
"when",
"the",
"user",
"clicks",
"on",
"a",
"plotted",
"star",
"in",
"the",
"fitsviewer",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Catalogs.py#L1028-L1040 |
26,178 | ejeschke/ginga | ginga/rv/plugins/Catalogs.py | CatalogListing.select_star_cb | def select_star_cb(self, widget, res_dict):
"""This method is called when the user selects a star from the table.
"""
keys = list(res_dict.keys())
if len(keys) == 0:
self.selected = []
self.replot_stars()
else:
idx = int(keys[0])
star = self.starlist[idx]
if not self._select_flag:
self.mark_selection(star, fromtable=True)
return True | python | def select_star_cb(self, widget, res_dict):
keys = list(res_dict.keys())
if len(keys) == 0:
self.selected = []
self.replot_stars()
else:
idx = int(keys[0])
star = self.starlist[idx]
if not self._select_flag:
self.mark_selection(star, fromtable=True)
return True | [
"def",
"select_star_cb",
"(",
"self",
",",
"widget",
",",
"res_dict",
")",
":",
"keys",
"=",
"list",
"(",
"res_dict",
".",
"keys",
"(",
")",
")",
"if",
"len",
"(",
"keys",
")",
"==",
"0",
":",
"self",
".",
"selected",
"=",
"[",
"]",
"self",
".",
... | This method is called when the user selects a star from the table. | [
"This",
"method",
"is",
"called",
"when",
"the",
"user",
"selects",
"a",
"star",
"from",
"the",
"table",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Catalogs.py#L1308-L1320 |
26,179 | ejeschke/ginga | ginga/BaseImage.py | BaseImage._calc_order | def _calc_order(self, order):
"""Called to set the order of a multi-channel image.
The order should be determined by the loader, but this will
make a best guess if passed `order` is `None`.
"""
if order is not None and order != '':
self.order = order.upper()
else:
shape = self.shape
if len(shape) <= 2:
self.order = 'M'
else:
depth = shape[-1]
# TODO: need something better here than a guess!
if depth == 1:
self.order = 'M'
elif depth == 2:
self.order = 'AM'
elif depth == 3:
self.order = 'RGB'
elif depth == 4:
self.order = 'RGBA' | python | def _calc_order(self, order):
if order is not None and order != '':
self.order = order.upper()
else:
shape = self.shape
if len(shape) <= 2:
self.order = 'M'
else:
depth = shape[-1]
# TODO: need something better here than a guess!
if depth == 1:
self.order = 'M'
elif depth == 2:
self.order = 'AM'
elif depth == 3:
self.order = 'RGB'
elif depth == 4:
self.order = 'RGBA' | [
"def",
"_calc_order",
"(",
"self",
",",
"order",
")",
":",
"if",
"order",
"is",
"not",
"None",
"and",
"order",
"!=",
"''",
":",
"self",
".",
"order",
"=",
"order",
".",
"upper",
"(",
")",
"else",
":",
"shape",
"=",
"self",
".",
"shape",
"if",
"le... | Called to set the order of a multi-channel image.
The order should be determined by the loader, but this will
make a best guess if passed `order` is `None`. | [
"Called",
"to",
"set",
"the",
"order",
"of",
"a",
"multi",
"-",
"channel",
"image",
".",
"The",
"order",
"should",
"be",
"determined",
"by",
"the",
"loader",
"but",
"this",
"will",
"make",
"a",
"best",
"guess",
"if",
"passed",
"order",
"is",
"None",
".... | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L221-L242 |
26,180 | ejeschke/ginga | ginga/BaseImage.py | BaseImage.cutout_data | def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None):
"""cut out data area based on coords.
"""
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data = self._slice(view)
if astype:
data = data.astype(astype, copy=False)
return data | python | def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, astype=None):
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data = self._slice(view)
if astype:
data = data.astype(astype, copy=False)
return data | [
"def",
"cutout_data",
"(",
"self",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"xstep",
"=",
"1",
",",
"ystep",
"=",
"1",
",",
"astype",
"=",
"None",
")",
":",
"view",
"=",
"np",
".",
"s_",
"[",
"y1",
":",
"y2",
":",
"ystep",
",",
"x1"... | cut out data area based on coords. | [
"cut",
"out",
"data",
"area",
"based",
"on",
"coords",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L293-L300 |
26,181 | ejeschke/ginga | ginga/BaseImage.py | BaseImage.get_shape_mask | def get_shape_mask(self, shape_obj):
"""
Return full mask where True marks pixels within the given shape.
"""
wd, ht = self.get_size()
yi = np.mgrid[:ht].reshape(-1, 1)
xi = np.mgrid[:wd].reshape(1, -1)
pts = np.asarray((xi, yi)).T
contains = shape_obj.contains_pts(pts)
return contains | python | def get_shape_mask(self, shape_obj):
wd, ht = self.get_size()
yi = np.mgrid[:ht].reshape(-1, 1)
xi = np.mgrid[:wd].reshape(1, -1)
pts = np.asarray((xi, yi)).T
contains = shape_obj.contains_pts(pts)
return contains | [
"def",
"get_shape_mask",
"(",
"self",
",",
"shape_obj",
")",
":",
"wd",
",",
"ht",
"=",
"self",
".",
"get_size",
"(",
")",
"yi",
"=",
"np",
".",
"mgrid",
"[",
":",
"ht",
"]",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"xi",
"=",
"np",
".",... | Return full mask where True marks pixels within the given shape. | [
"Return",
"full",
"mask",
"where",
"True",
"marks",
"pixels",
"within",
"the",
"given",
"shape",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L348-L357 |
26,182 | ejeschke/ginga | ginga/BaseImage.py | BaseImage.get_shape_view | def get_shape_view(self, shape_obj, avoid_oob=True):
"""
Calculate a bounding box in the data enclosing `shape_obj` and
return a view that accesses it and a mask that is True only for
pixels enclosed in the region.
If `avoid_oob` is True (default) then the bounding box is clipped
to avoid coordinates outside of the actual data.
"""
x1, y1, x2, y2 = [int(np.round(n)) for n in shape_obj.get_llur()]
if avoid_oob:
# avoid out of bounds indexes
wd, ht = self.get_size()
x1, x2 = max(0, x1), min(x2, wd - 1)
y1, y2 = max(0, y1), min(y2, ht - 1)
# calculate pixel containment mask in bbox
yi = np.mgrid[y1:y2 + 1].reshape(-1, 1)
xi = np.mgrid[x1:x2 + 1].reshape(1, -1)
pts = np.asarray((xi, yi)).T
contains = shape_obj.contains_pts(pts)
view = np.s_[y1:y2 + 1, x1:x2 + 1]
return (view, contains) | python | def get_shape_view(self, shape_obj, avoid_oob=True):
x1, y1, x2, y2 = [int(np.round(n)) for n in shape_obj.get_llur()]
if avoid_oob:
# avoid out of bounds indexes
wd, ht = self.get_size()
x1, x2 = max(0, x1), min(x2, wd - 1)
y1, y2 = max(0, y1), min(y2, ht - 1)
# calculate pixel containment mask in bbox
yi = np.mgrid[y1:y2 + 1].reshape(-1, 1)
xi = np.mgrid[x1:x2 + 1].reshape(1, -1)
pts = np.asarray((xi, yi)).T
contains = shape_obj.contains_pts(pts)
view = np.s_[y1:y2 + 1, x1:x2 + 1]
return (view, contains) | [
"def",
"get_shape_view",
"(",
"self",
",",
"shape_obj",
",",
"avoid_oob",
"=",
"True",
")",
":",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
"=",
"[",
"int",
"(",
"np",
".",
"round",
"(",
"n",
")",
")",
"for",
"n",
"in",
"shape_obj",
".",
"get_llur",
... | Calculate a bounding box in the data enclosing `shape_obj` and
return a view that accesses it and a mask that is True only for
pixels enclosed in the region.
If `avoid_oob` is True (default) then the bounding box is clipped
to avoid coordinates outside of the actual data. | [
"Calculate",
"a",
"bounding",
"box",
"in",
"the",
"data",
"enclosing",
"shape_obj",
"and",
"return",
"a",
"view",
"that",
"accesses",
"it",
"and",
"a",
"mask",
"that",
"is",
"True",
"only",
"for",
"pixels",
"enclosed",
"in",
"the",
"region",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L359-L383 |
26,183 | ejeschke/ginga | ginga/BaseImage.py | BaseImage.cutout_shape | def cutout_shape(self, shape_obj):
"""
Cut out and return a portion of the data corresponding to `shape_obj`.
A masked numpy array is returned, where the pixels not enclosed in
the shape are masked out.
"""
view, mask = self.get_shape_view(shape_obj)
# cutout our enclosing (possibly shortened) bbox
data = self._slice(view)
# mask non-containing members
mdata = np.ma.array(data, mask=np.logical_not(mask))
return mdata | python | def cutout_shape(self, shape_obj):
view, mask = self.get_shape_view(shape_obj)
# cutout our enclosing (possibly shortened) bbox
data = self._slice(view)
# mask non-containing members
mdata = np.ma.array(data, mask=np.logical_not(mask))
return mdata | [
"def",
"cutout_shape",
"(",
"self",
",",
"shape_obj",
")",
":",
"view",
",",
"mask",
"=",
"self",
".",
"get_shape_view",
"(",
"shape_obj",
")",
"# cutout our enclosing (possibly shortened) bbox",
"data",
"=",
"self",
".",
"_slice",
"(",
"view",
")",
"# mask non-... | Cut out and return a portion of the data corresponding to `shape_obj`.
A masked numpy array is returned, where the pixels not enclosed in
the shape are masked out. | [
"Cut",
"out",
"and",
"return",
"a",
"portion",
"of",
"the",
"data",
"corresponding",
"to",
"shape_obj",
".",
"A",
"masked",
"numpy",
"array",
"is",
"returned",
"where",
"the",
"pixels",
"not",
"enclosed",
"in",
"the",
"shape",
"are",
"masked",
"out",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/BaseImage.py#L385-L399 |
26,184 | ejeschke/ginga | ginga/misc/Callback.py | Callbacks.remove_callback | def remove_callback(self, name, fn, *args, **kwargs):
"""Remove a specific callback that was added.
"""
try:
tup = (fn, args, kwargs)
if tup in self.cb[name]:
self.cb[name].remove(tup)
except KeyError:
raise CallbackError("No callback category of '%s'" % (
name)) | python | def remove_callback(self, name, fn, *args, **kwargs):
try:
tup = (fn, args, kwargs)
if tup in self.cb[name]:
self.cb[name].remove(tup)
except KeyError:
raise CallbackError("No callback category of '%s'" % (
name)) | [
"def",
"remove_callback",
"(",
"self",
",",
"name",
",",
"fn",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"tup",
"=",
"(",
"fn",
",",
"args",
",",
"kwargs",
")",
"if",
"tup",
"in",
"self",
".",
"cb",
"[",
"name",
"]",
":... | Remove a specific callback that was added. | [
"Remove",
"a",
"specific",
"callback",
"that",
"was",
"added",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/misc/Callback.py#L83-L92 |
26,185 | ejeschke/ginga | ginga/qtw/QtHelp.py | cmap2pixmap | def cmap2pixmap(cmap, steps=50):
"""Convert a Ginga colormap into a QPixmap
"""
import numpy as np
inds = np.linspace(0, 1, steps)
n = len(cmap.clst) - 1
tups = [cmap.clst[int(x * n)] for x in inds]
rgbas = [QColor(int(r * 255), int(g * 255),
int(b * 255), 255).rgba() for r, g, b in tups]
im = QImage(steps, 1, QImage.Format_Indexed8)
im.setColorTable(rgbas)
for i in range(steps):
im.setPixel(i, 0, i)
im = im.scaled(128, 32)
pm = QPixmap.fromImage(im)
return pm | python | def cmap2pixmap(cmap, steps=50):
import numpy as np
inds = np.linspace(0, 1, steps)
n = len(cmap.clst) - 1
tups = [cmap.clst[int(x * n)] for x in inds]
rgbas = [QColor(int(r * 255), int(g * 255),
int(b * 255), 255).rgba() for r, g, b in tups]
im = QImage(steps, 1, QImage.Format_Indexed8)
im.setColorTable(rgbas)
for i in range(steps):
im.setPixel(i, 0, i)
im = im.scaled(128, 32)
pm = QPixmap.fromImage(im)
return pm | [
"def",
"cmap2pixmap",
"(",
"cmap",
",",
"steps",
"=",
"50",
")",
":",
"import",
"numpy",
"as",
"np",
"inds",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"steps",
")",
"n",
"=",
"len",
"(",
"cmap",
".",
"clst",
")",
"-",
"1",
"tups",
... | Convert a Ginga colormap into a QPixmap | [
"Convert",
"a",
"Ginga",
"colormap",
"into",
"a",
"QPixmap"
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/qtw/QtHelp.py#L293-L309 |
26,186 | ejeschke/ginga | ginga/qtw/QtHelp.py | Timer.start | def start(self, duration=None):
"""Start the timer. If `duration` is not None, it should
specify the time to expiration in seconds.
"""
if duration is None:
duration = self.duration
self.set(duration) | python | def start(self, duration=None):
if duration is None:
duration = self.duration
self.set(duration) | [
"def",
"start",
"(",
"self",
",",
"duration",
"=",
"None",
")",
":",
"if",
"duration",
"is",
"None",
":",
"duration",
"=",
"self",
".",
"duration",
"self",
".",
"set",
"(",
"duration",
")"
] | Start the timer. If `duration` is not None, it should
specify the time to expiration in seconds. | [
"Start",
"the",
"timer",
".",
"If",
"duration",
"is",
"not",
"None",
"it",
"should",
"specify",
"the",
"time",
"to",
"expiration",
"in",
"seconds",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/qtw/QtHelp.py#L258-L265 |
26,187 | ejeschke/ginga | ginga/rv/plugins/ScreenShot.py | ScreenShot._snap_cb | def _snap_cb(self, w):
"""This function is called when the user clicks the 'Snap' button.
"""
# Clear the snap image viewer
self.scrnimage.clear()
self.scrnimage.redraw_now(whence=0)
self.fv.update_pending()
format = self.tosave_type
if self._screen_size:
# snap image using actual viewer
self.fv.error_wrap(self.fitsimage.save_rgb_image_as_file,
self.tmpname, format=format)
else:
# we will be using shot generator, not actual viewer.
# check that shot generator size matches UI params
self.check_and_adjust_dimensions()
# copy background color of viewer to shot generator
bg = self.fitsimage.get_bg()
self.shot_generator.set_bg(*bg)
# add the main canvas from channel viewer to shot generator
c1 = self.fitsimage.get_canvas()
c2 = self.shot_generator.get_canvas()
c2.delete_all_objects(redraw=False)
c2.add(c1, redraw=False)
# hack to fix a few problem graphics
self.shot_generator._imgobj = self.fitsimage._imgobj
# scale of the shot generator should be the scale of channel
# viewer multiplied by the ratio of window sizes
scale_x, scale_y = self.fitsimage.get_scale_xy()
c1_wd, c1_ht = self.fitsimage.get_window_size()
c2_wd, c2_ht = self.shot_generator.get_window_size()
scale_wd = float(c2_wd) / float(c1_wd)
scale_ht = float(c2_ht) / float(c1_ht)
scale = max(scale_wd, scale_ht)
scale_x *= scale
scale_y *= scale
self.shot_generator.scale_to(scale_x, scale_y)
self.fitsimage.copy_attributes(self.shot_generator,
self.transfer_attrs)
# snap image
self.fv.error_wrap(self.shot_generator.save_rgb_image_as_file,
self.tmpname, format=format)
c2.delete_all_objects(redraw=False)
self.shot_generator._imgobj = None
self.saved_type = format
img = RGBImage(logger=self.logger)
img.load_file(self.tmpname)
# load the snapped image into the screenshot viewer
self.scrnimage.set_image(img) | python | def _snap_cb(self, w):
# Clear the snap image viewer
self.scrnimage.clear()
self.scrnimage.redraw_now(whence=0)
self.fv.update_pending()
format = self.tosave_type
if self._screen_size:
# snap image using actual viewer
self.fv.error_wrap(self.fitsimage.save_rgb_image_as_file,
self.tmpname, format=format)
else:
# we will be using shot generator, not actual viewer.
# check that shot generator size matches UI params
self.check_and_adjust_dimensions()
# copy background color of viewer to shot generator
bg = self.fitsimage.get_bg()
self.shot_generator.set_bg(*bg)
# add the main canvas from channel viewer to shot generator
c1 = self.fitsimage.get_canvas()
c2 = self.shot_generator.get_canvas()
c2.delete_all_objects(redraw=False)
c2.add(c1, redraw=False)
# hack to fix a few problem graphics
self.shot_generator._imgobj = self.fitsimage._imgobj
# scale of the shot generator should be the scale of channel
# viewer multiplied by the ratio of window sizes
scale_x, scale_y = self.fitsimage.get_scale_xy()
c1_wd, c1_ht = self.fitsimage.get_window_size()
c2_wd, c2_ht = self.shot_generator.get_window_size()
scale_wd = float(c2_wd) / float(c1_wd)
scale_ht = float(c2_ht) / float(c1_ht)
scale = max(scale_wd, scale_ht)
scale_x *= scale
scale_y *= scale
self.shot_generator.scale_to(scale_x, scale_y)
self.fitsimage.copy_attributes(self.shot_generator,
self.transfer_attrs)
# snap image
self.fv.error_wrap(self.shot_generator.save_rgb_image_as_file,
self.tmpname, format=format)
c2.delete_all_objects(redraw=False)
self.shot_generator._imgobj = None
self.saved_type = format
img = RGBImage(logger=self.logger)
img.load_file(self.tmpname)
# load the snapped image into the screenshot viewer
self.scrnimage.set_image(img) | [
"def",
"_snap_cb",
"(",
"self",
",",
"w",
")",
":",
"# Clear the snap image viewer",
"self",
".",
"scrnimage",
".",
"clear",
"(",
")",
"self",
".",
"scrnimage",
".",
"redraw_now",
"(",
"whence",
"=",
"0",
")",
"self",
".",
"fv",
".",
"update_pending",
"(... | This function is called when the user clicks the 'Snap' button. | [
"This",
"function",
"is",
"called",
"when",
"the",
"user",
"clicks",
"the",
"Snap",
"button",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ScreenShot.py#L272-L332 |
26,188 | ejeschke/ginga | ginga/rv/plugins/ScreenShot.py | ScreenShot._save_cb | def _save_cb(self, w):
"""This function is called when the user clicks the 'Save' button.
We save the last taken shot to the folder and name specified.
"""
format = self.saved_type
if format is None:
return self.fv.show_error("Please save an image first.")
# create filename
filename = self.w.name.get_text().strip()
if len(filename) == 0:
return self.fv.show_error("Please set a name for saving the file")
self.save_name = filename
if not filename.lower().endswith('.' + format):
filename = filename + '.' + format
# join to path
path = self.w.folder.get_text().strip()
if path == '':
path = filename
else:
self.save_path = path
path = os.path.join(path, filename)
# copy last saved file
self.fv.error_wrap(shutil.copyfile, self.tmpname, path) | python | def _save_cb(self, w):
format = self.saved_type
if format is None:
return self.fv.show_error("Please save an image first.")
# create filename
filename = self.w.name.get_text().strip()
if len(filename) == 0:
return self.fv.show_error("Please set a name for saving the file")
self.save_name = filename
if not filename.lower().endswith('.' + format):
filename = filename + '.' + format
# join to path
path = self.w.folder.get_text().strip()
if path == '':
path = filename
else:
self.save_path = path
path = os.path.join(path, filename)
# copy last saved file
self.fv.error_wrap(shutil.copyfile, self.tmpname, path) | [
"def",
"_save_cb",
"(",
"self",
",",
"w",
")",
":",
"format",
"=",
"self",
".",
"saved_type",
"if",
"format",
"is",
"None",
":",
"return",
"self",
".",
"fv",
".",
"show_error",
"(",
"\"Please save an image first.\"",
")",
"# create filename",
"filename",
"="... | This function is called when the user clicks the 'Save' button.
We save the last taken shot to the folder and name specified. | [
"This",
"function",
"is",
"called",
"when",
"the",
"user",
"clicks",
"the",
"Save",
"button",
".",
"We",
"save",
"the",
"last",
"taken",
"shot",
"to",
"the",
"folder",
"and",
"name",
"specified",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ScreenShot.py#L354-L380 |
26,189 | ejeschke/ginga | ginga/rv/plugins/ScreenShot.py | ScreenShot._lock_aspect_cb | def _lock_aspect_cb(self, w, tf):
"""This function is called when the user clicks the 'Lock aspect'
checkbox. `tf` is True if checked, False otherwise.
"""
self._lock_aspect = tf
self.w.aspect.set_enabled(tf)
if self._lock_aspect:
self._set_aspect_cb()
else:
wd, ht = self.get_wdht()
_as = self.calc_aspect_str(wd, ht)
self.w.aspect.set_text(_as) | python | def _lock_aspect_cb(self, w, tf):
self._lock_aspect = tf
self.w.aspect.set_enabled(tf)
if self._lock_aspect:
self._set_aspect_cb()
else:
wd, ht = self.get_wdht()
_as = self.calc_aspect_str(wd, ht)
self.w.aspect.set_text(_as) | [
"def",
"_lock_aspect_cb",
"(",
"self",
",",
"w",
",",
"tf",
")",
":",
"self",
".",
"_lock_aspect",
"=",
"tf",
"self",
".",
"w",
".",
"aspect",
".",
"set_enabled",
"(",
"tf",
")",
"if",
"self",
".",
"_lock_aspect",
":",
"self",
".",
"_set_aspect_cb",
... | This function is called when the user clicks the 'Lock aspect'
checkbox. `tf` is True if checked, False otherwise. | [
"This",
"function",
"is",
"called",
"when",
"the",
"user",
"clicks",
"the",
"Lock",
"aspect",
"checkbox",
".",
"tf",
"is",
"True",
"if",
"checked",
"False",
"otherwise",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ScreenShot.py#L494-L505 |
26,190 | ejeschke/ginga | ginga/rv/plugins/ScreenShot.py | ScreenShot._screen_size_cb | def _screen_size_cb(self, w, tf):
"""This function is called when the user clicks the 'Screen size'
checkbox. `tf` is True if checked, False otherwise.
"""
self._screen_size = tf
self.w.width.set_enabled(not tf)
self.w.height.set_enabled(not tf)
self.w.lock_aspect.set_enabled(not tf)
if self._screen_size:
wd, ht = self.fitsimage.get_window_size()
self._configure_cb(self.fitsimage, wd, ht) | python | def _screen_size_cb(self, w, tf):
self._screen_size = tf
self.w.width.set_enabled(not tf)
self.w.height.set_enabled(not tf)
self.w.lock_aspect.set_enabled(not tf)
if self._screen_size:
wd, ht = self.fitsimage.get_window_size()
self._configure_cb(self.fitsimage, wd, ht) | [
"def",
"_screen_size_cb",
"(",
"self",
",",
"w",
",",
"tf",
")",
":",
"self",
".",
"_screen_size",
"=",
"tf",
"self",
".",
"w",
".",
"width",
".",
"set_enabled",
"(",
"not",
"tf",
")",
"self",
".",
"w",
".",
"height",
".",
"set_enabled",
"(",
"not"... | This function is called when the user clicks the 'Screen size'
checkbox. `tf` is True if checked, False otherwise. | [
"This",
"function",
"is",
"called",
"when",
"the",
"user",
"clicks",
"the",
"Screen",
"size",
"checkbox",
".",
"tf",
"is",
"True",
"if",
"checked",
"False",
"otherwise",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ScreenShot.py#L517-L527 |
26,191 | ejeschke/ginga | ginga/util/io_asdf.py | load_asdf | def load_asdf(asdf_obj, data_key='sci', wcs_key='wcs', header_key='meta'):
"""
Load from an ASDF object.
Parameters
----------
asdf_obj : obj
ASDF or ASDF-in-FITS object.
data_key, wcs_key, header_key : str
Key values to specify where to find data, WCS, and header
in ASDF.
Returns
-------
data : ndarray or `None`
Image data, if found.
wcs : obj or `None`
GWCS object or models, if found.
ahdr : dict
Header containing metadata.
"""
asdf_keys = asdf_obj.keys()
if wcs_key in asdf_keys:
wcs = asdf_obj[wcs_key]
else:
wcs = None
if header_key in asdf_keys:
ahdr = asdf_obj[header_key]
else:
ahdr = {}
# TODO: What about non-image ASDF data, such as table?
if data_key in asdf_keys:
data = np.asarray(asdf_obj[data_key])
else:
data = None
return data, wcs, ahdr | python | def load_asdf(asdf_obj, data_key='sci', wcs_key='wcs', header_key='meta'):
asdf_keys = asdf_obj.keys()
if wcs_key in asdf_keys:
wcs = asdf_obj[wcs_key]
else:
wcs = None
if header_key in asdf_keys:
ahdr = asdf_obj[header_key]
else:
ahdr = {}
# TODO: What about non-image ASDF data, such as table?
if data_key in asdf_keys:
data = np.asarray(asdf_obj[data_key])
else:
data = None
return data, wcs, ahdr | [
"def",
"load_asdf",
"(",
"asdf_obj",
",",
"data_key",
"=",
"'sci'",
",",
"wcs_key",
"=",
"'wcs'",
",",
"header_key",
"=",
"'meta'",
")",
":",
"asdf_keys",
"=",
"asdf_obj",
".",
"keys",
"(",
")",
"if",
"wcs_key",
"in",
"asdf_keys",
":",
"wcs",
"=",
"asd... | Load from an ASDF object.
Parameters
----------
asdf_obj : obj
ASDF or ASDF-in-FITS object.
data_key, wcs_key, header_key : str
Key values to specify where to find data, WCS, and header
in ASDF.
Returns
-------
data : ndarray or `None`
Image data, if found.
wcs : obj or `None`
GWCS object or models, if found.
ahdr : dict
Header containing metadata. | [
"Load",
"from",
"an",
"ASDF",
"object",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/io_asdf.py#L23-L66 |
26,192 | ejeschke/ginga | ginga/rv/plugins/Colorbar.py | Colorbar._match_cmap | def _match_cmap(self, fitsimage, colorbar):
"""
Help method to change the ColorBar to match the cut levels or
colormap used in a ginga ImageView.
"""
rgbmap = fitsimage.get_rgbmap()
loval, hival = fitsimage.get_cut_levels()
colorbar.set_range(loval, hival)
# If we are sharing a ColorBar for all channels, then store
# to change the ColorBar's rgbmap to match our
colorbar.set_rgbmap(rgbmap) | python | def _match_cmap(self, fitsimage, colorbar):
rgbmap = fitsimage.get_rgbmap()
loval, hival = fitsimage.get_cut_levels()
colorbar.set_range(loval, hival)
# If we are sharing a ColorBar for all channels, then store
# to change the ColorBar's rgbmap to match our
colorbar.set_rgbmap(rgbmap) | [
"def",
"_match_cmap",
"(",
"self",
",",
"fitsimage",
",",
"colorbar",
")",
":",
"rgbmap",
"=",
"fitsimage",
".",
"get_rgbmap",
"(",
")",
"loval",
",",
"hival",
"=",
"fitsimage",
".",
"get_cut_levels",
"(",
")",
"colorbar",
".",
"set_range",
"(",
"loval",
... | Help method to change the ColorBar to match the cut levels or
colormap used in a ginga ImageView. | [
"Help",
"method",
"to",
"change",
"the",
"ColorBar",
"to",
"match",
"the",
"cut",
"levels",
"or",
"colormap",
"used",
"in",
"a",
"ginga",
"ImageView",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Colorbar.py#L88-L98 |
26,193 | ejeschke/ginga | ginga/rv/plugins/Colorbar.py | Colorbar.rgbmap_cb | def rgbmap_cb(self, rgbmap, channel):
"""
This method is called when the RGBMap is changed. We update
the ColorBar to match.
"""
if not self.gui_up:
return
fitsimage = channel.fitsimage
if fitsimage != self.fv.getfocus_fitsimage():
return False
self.change_cbar(self.fv, channel) | python | def rgbmap_cb(self, rgbmap, channel):
if not self.gui_up:
return
fitsimage = channel.fitsimage
if fitsimage != self.fv.getfocus_fitsimage():
return False
self.change_cbar(self.fv, channel) | [
"def",
"rgbmap_cb",
"(",
"self",
",",
"rgbmap",
",",
"channel",
")",
":",
"if",
"not",
"self",
".",
"gui_up",
":",
"return",
"fitsimage",
"=",
"channel",
".",
"fitsimage",
"if",
"fitsimage",
"!=",
"self",
".",
"fv",
".",
"getfocus_fitsimage",
"(",
")",
... | This method is called when the RGBMap is changed. We update
the ColorBar to match. | [
"This",
"method",
"is",
"called",
"when",
"the",
"RGBMap",
"is",
"changed",
".",
"We",
"update",
"the",
"ColorBar",
"to",
"match",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/Colorbar.py#L130-L140 |
26,194 | ejeschke/ginga | ginga/util/addons.py | show_mode_indicator | def show_mode_indicator(viewer, tf, corner='ur'):
"""Show a keyboard mode indicator in one of the corners.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
corner : str
One of 'll', 'lr', 'ul' or 'ur' selecting a corner.
The default is 'ur'.
"""
tag = '_$mode_indicator'
canvas = viewer.get_private_canvas()
try:
indic = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
indic.corner = corner
except KeyError:
if tf:
# force a redraw if the mode changes
bm = viewer.get_bindmap()
bm.add_callback('mode-set',
lambda *args: viewer.redraw(whence=3))
Indicator = canvas.get_draw_class('modeindicator')
canvas.add(Indicator(corner=corner),
tag=tag, redraw=False)
canvas.update_canvas(whence=3) | python | def show_mode_indicator(viewer, tf, corner='ur'):
tag = '_$mode_indicator'
canvas = viewer.get_private_canvas()
try:
indic = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
indic.corner = corner
except KeyError:
if tf:
# force a redraw if the mode changes
bm = viewer.get_bindmap()
bm.add_callback('mode-set',
lambda *args: viewer.redraw(whence=3))
Indicator = canvas.get_draw_class('modeindicator')
canvas.add(Indicator(corner=corner),
tag=tag, redraw=False)
canvas.update_canvas(whence=3) | [
"def",
"show_mode_indicator",
"(",
"viewer",
",",
"tf",
",",
"corner",
"=",
"'ur'",
")",
":",
"tag",
"=",
"'_$mode_indicator'",
"canvas",
"=",
"viewer",
".",
"get_private_canvas",
"(",
")",
"try",
":",
"indic",
"=",
"canvas",
".",
"get_object_by_tag",
"(",
... | Show a keyboard mode indicator in one of the corners.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
corner : str
One of 'll', 'lr', 'ul' or 'ur' selecting a corner.
The default is 'ur'. | [
"Show",
"a",
"keyboard",
"mode",
"indicator",
"in",
"one",
"of",
"the",
"corners",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/addons.py#L44-L81 |
26,195 | ejeschke/ginga | ginga/util/addons.py | show_color_bar | def show_color_bar(viewer, tf, side='bottom'):
"""Show a color bar in the window.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the color bar; else remove it if present.
side : str
One of 'top' or 'bottom'. The default is 'bottom'.
"""
tag = '_$color_bar'
canvas = viewer.get_private_canvas()
try:
cbar = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
cbar.side = side
except KeyError:
if tf:
Cbar = canvas.get_draw_class('colorbar')
canvas.add(Cbar(side=side), tag=tag, redraw=False)
canvas.update_canvas(whence=3) | python | def show_color_bar(viewer, tf, side='bottom'):
tag = '_$color_bar'
canvas = viewer.get_private_canvas()
try:
cbar = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
cbar.side = side
except KeyError:
if tf:
Cbar = canvas.get_draw_class('colorbar')
canvas.add(Cbar(side=side), tag=tag, redraw=False)
canvas.update_canvas(whence=3) | [
"def",
"show_color_bar",
"(",
"viewer",
",",
"tf",
",",
"side",
"=",
"'bottom'",
")",
":",
"tag",
"=",
"'_$color_bar'",
"canvas",
"=",
"viewer",
".",
"get_private_canvas",
"(",
")",
"try",
":",
"cbar",
"=",
"canvas",
".",
"get_object_by_tag",
"(",
"tag",
... | Show a color bar in the window.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the color bar; else remove it if present.
side : str
One of 'top' or 'bottom'. The default is 'bottom'. | [
"Show",
"a",
"color",
"bar",
"in",
"the",
"window",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/addons.py#L84-L114 |
26,196 | ejeschke/ginga | ginga/util/addons.py | show_focus_indicator | def show_focus_indicator(viewer, tf, color='white'):
"""Show a focus indicator in the window.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the color bar; else remove it if present.
color : str
Color for the focus indicator.
"""
tag = '_$focus_indicator'
canvas = viewer.get_private_canvas()
try:
fcsi = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
fcsi.color = color
except KeyError:
if tf:
Fcsi = canvas.get_draw_class('focusindicator')
fcsi = Fcsi(color=color)
canvas.add(fcsi, tag=tag, redraw=False)
viewer.add_callback('focus', fcsi.focus_cb)
canvas.update_canvas(whence=3) | python | def show_focus_indicator(viewer, tf, color='white'):
tag = '_$focus_indicator'
canvas = viewer.get_private_canvas()
try:
fcsi = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
fcsi.color = color
except KeyError:
if tf:
Fcsi = canvas.get_draw_class('focusindicator')
fcsi = Fcsi(color=color)
canvas.add(fcsi, tag=tag, redraw=False)
viewer.add_callback('focus', fcsi.focus_cb)
canvas.update_canvas(whence=3) | [
"def",
"show_focus_indicator",
"(",
"viewer",
",",
"tf",
",",
"color",
"=",
"'white'",
")",
":",
"tag",
"=",
"'_$focus_indicator'",
"canvas",
"=",
"viewer",
".",
"get_private_canvas",
"(",
")",
"try",
":",
"fcsi",
"=",
"canvas",
".",
"get_object_by_tag",
"("... | Show a focus indicator in the window.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the color bar; else remove it if present.
color : str
Color for the focus indicator. | [
"Show",
"a",
"focus",
"indicator",
"in",
"the",
"window",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/addons.py#L117-L149 |
26,197 | ejeschke/ginga | ginga/util/addons.py | add_zoom_buttons | def add_zoom_buttons(viewer, canvas=None, color='black'):
"""Add zoom buttons to a canvas.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
canvas : a DrawingCanvas instance
The canvas to which the buttons should be added. If not supplied
defaults to the private canvas of the viewer.
color : str
A color name, hex triplet. The default is 'black'.
"""
def zoom(box, canvas, event, pt, viewer, n):
zl = viewer.get_zoom()
zl += n
if zl == 0.0:
zl += n
viewer.zoom_to(zl + n)
def add_buttons(viewer, canvas, tag):
objs = []
wd, ht = viewer.get_window_size()
SquareBox = canvas.get_draw_class('squarebox')
Text = canvas.get_draw_class('text')
Compound = canvas.get_draw_class('compoundobject')
x1, y1 = wd - 20, ht // 2 + 20
zoomin = SquareBox(x1, y1, 15, color='yellow', fill=True,
fillcolor='gray', fillalpha=0.5, coord='window')
zoomin.editable = False
zoomin.pickable = True
zoomin.add_callback('pick-down', zoom, viewer, 1)
objs.append(zoomin)
x2, y2 = wd - 20, ht // 2 - 20
zoomout = SquareBox(x2, y2, 15, color='yellow', fill=True,
fillcolor='gray', fillalpha=0.5, coord='window')
zoomout.editable = False
zoomout.pickable = True
zoomout.add_callback('pick-down', zoom, viewer, -1)
objs.append(zoomout)
objs.append(Text(x1 - 4, y1 + 6, text='+', fontsize=18, color=color,
coord='window'))
objs.append(Text(x2 - 4, y2 + 6, text='--', fontsize=18, color=color,
coord='window'))
obj = Compound(*objs)
obj.opaque = False
canvas.add(obj, tag=tag)
def zoom_resize(viewer, width, height, canvas, tag):
try:
canvas.get_object_by_tag(tag)
except KeyError:
return False
canvas.delete_object_by_tag(tag)
add_buttons(viewer, canvas, tag)
tag = '_$zoom_buttons'
if canvas is None:
canvas = viewer.get_private_canvas()
canvas.ui_set_active(True)
canvas.register_for_cursor_drawing(viewer)
canvas.set_draw_mode('pick')
viewer.add_callback('configure', zoom_resize, canvas, tag)
add_buttons(viewer, canvas, tag) | python | def add_zoom_buttons(viewer, canvas=None, color='black'):
def zoom(box, canvas, event, pt, viewer, n):
zl = viewer.get_zoom()
zl += n
if zl == 0.0:
zl += n
viewer.zoom_to(zl + n)
def add_buttons(viewer, canvas, tag):
objs = []
wd, ht = viewer.get_window_size()
SquareBox = canvas.get_draw_class('squarebox')
Text = canvas.get_draw_class('text')
Compound = canvas.get_draw_class('compoundobject')
x1, y1 = wd - 20, ht // 2 + 20
zoomin = SquareBox(x1, y1, 15, color='yellow', fill=True,
fillcolor='gray', fillalpha=0.5, coord='window')
zoomin.editable = False
zoomin.pickable = True
zoomin.add_callback('pick-down', zoom, viewer, 1)
objs.append(zoomin)
x2, y2 = wd - 20, ht // 2 - 20
zoomout = SquareBox(x2, y2, 15, color='yellow', fill=True,
fillcolor='gray', fillalpha=0.5, coord='window')
zoomout.editable = False
zoomout.pickable = True
zoomout.add_callback('pick-down', zoom, viewer, -1)
objs.append(zoomout)
objs.append(Text(x1 - 4, y1 + 6, text='+', fontsize=18, color=color,
coord='window'))
objs.append(Text(x2 - 4, y2 + 6, text='--', fontsize=18, color=color,
coord='window'))
obj = Compound(*objs)
obj.opaque = False
canvas.add(obj, tag=tag)
def zoom_resize(viewer, width, height, canvas, tag):
try:
canvas.get_object_by_tag(tag)
except KeyError:
return False
canvas.delete_object_by_tag(tag)
add_buttons(viewer, canvas, tag)
tag = '_$zoom_buttons'
if canvas is None:
canvas = viewer.get_private_canvas()
canvas.ui_set_active(True)
canvas.register_for_cursor_drawing(viewer)
canvas.set_draw_mode('pick')
viewer.add_callback('configure', zoom_resize, canvas, tag)
add_buttons(viewer, canvas, tag) | [
"def",
"add_zoom_buttons",
"(",
"viewer",
",",
"canvas",
"=",
"None",
",",
"color",
"=",
"'black'",
")",
":",
"def",
"zoom",
"(",
"box",
",",
"canvas",
",",
"event",
",",
"pt",
",",
"viewer",
",",
"n",
")",
":",
"zl",
"=",
"viewer",
".",
"get_zoom"... | Add zoom buttons to a canvas.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
canvas : a DrawingCanvas instance
The canvas to which the buttons should be added. If not supplied
defaults to the private canvas of the viewer.
color : str
A color name, hex triplet. The default is 'black'. | [
"Add",
"zoom",
"buttons",
"to",
"a",
"canvas",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/addons.py#L152-L220 |
26,198 | ejeschke/ginga | ginga/gtkw/ImageViewGtk.py | ImageViewGtk.expose_event | def expose_event(self, widget, event):
"""When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area.
"""
x, y, width, height = event.area
self.logger.debug("surface is %s" % self.surface)
if self.surface is not None:
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
# Paint from off-screen surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False | python | def expose_event(self, widget, event):
x, y, width, height = event.area
self.logger.debug("surface is %s" % self.surface)
if self.surface is not None:
win = widget.get_window()
cr = win.cairo_create()
# set clip area for exposed region
cr.rectangle(x, y, width, height)
cr.clip()
# Paint from off-screen surface
cr.set_source_surface(self.surface, 0, 0)
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.paint()
return False | [
"def",
"expose_event",
"(",
"self",
",",
"widget",
",",
"event",
")",
":",
"x",
",",
"y",
",",
"width",
",",
"height",
"=",
"event",
".",
"area",
"self",
".",
"logger",
".",
"debug",
"(",
"\"surface is %s\"",
"%",
"self",
".",
"surface",
")",
"if",
... | When an area of the window is exposed, we just copy out of the
server-side, off-screen surface to that area. | [
"When",
"an",
"area",
"of",
"the",
"window",
"is",
"exposed",
"we",
"just",
"copy",
"out",
"of",
"the",
"server",
"-",
"side",
"off",
"-",
"screen",
"surface",
"to",
"that",
"area",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/gtkw/ImageViewGtk.py#L150-L169 |
26,199 | ejeschke/ginga | ginga/gtkw/ImageViewGtk.py | ImageViewGtk.size_request | def size_request(self, widget, requisition):
"""Callback function to request our desired size.
"""
requisition.width, requisition.height = self.get_desired_size()
return True | python | def size_request(self, widget, requisition):
requisition.width, requisition.height = self.get_desired_size()
return True | [
"def",
"size_request",
"(",
"self",
",",
"widget",
",",
"requisition",
")",
":",
"requisition",
".",
"width",
",",
"requisition",
".",
"height",
"=",
"self",
".",
"get_desired_size",
"(",
")",
"return",
"True"
] | Callback function to request our desired size. | [
"Callback",
"function",
"to",
"request",
"our",
"desired",
"size",
"."
] | a78c893ec6f37a837de851947e9bb4625c597915 | https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/gtkw/ImageViewGtk.py#L195-L199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.