repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
awacha/credolib
|
credolib/io.py
|
filter_headers
|
python
|
def filter_headers(criterion):
ip = get_ipython()
for headerkind in ['processed', 'raw']:
for h in ip.user_ns['_headers'][headerkind][:]:
if not criterion(h):
ip.user_ns['_headers'][headerkind].remove(h)
ip.user_ns['allsamplenames'] = {h.title for h in ip.user_ns['_headers']['processed']}
|
Filter already loaded headers against some criterion.
The criterion function must accept a single argument, which is an instance
of sastool.classes2.header.Header, or one of its subclasses. The function
must return True if the header is to be kept or False if it needs to be
discarded. All manipulations on the header (including sample name changes,
etc.) carried out by this function are preserved.
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/io.py#L13-L27
| null |
__all__ = ['load_headers', 'getsascurve', 'getsasexposure', 'getheaders', 'getdists', 'filter_headers', 'load_exposure',
'load_mask']
from typing import List, Tuple, Union
import numpy as np
from IPython.core.getipython import get_ipython
from sastool.classes2.curve import Curve
from sastool.classes2.exposure import Exposure
from sastool.classes2.header import Header
from sastool.classes2.loader import Loader
def load_headers(fsns:List[int]):
"""Load header files
"""
ip = get_ipython()
ip.user_ns['_headers'] = {}
for type_ in ['raw', 'processed']:
print("Loading %d headers (%s)" % (len(fsns), type_), flush=True)
processed = type_ == 'processed'
headers = []
for f in fsns:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]:
try:
headers.append(l.loadheader(f))
break
except FileNotFoundError:
continue
allsamplenames = {h.title for h in headers}
if not headers:
print('NO HEADERS READ FOR TYPE "%s"' % type_)
else:
print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns)))
print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers]))
print("Samples covered by these headers:")
print(" " + "\n ".join(sorted(allsamplenames)), flush=True)
if processed:
ip.user_ns['allsamplenames'] = allsamplenames
ip.user_ns['_headers'][type_] = headers
def getsascurve(samplename:str, dist=None) -> Tuple[Curve, Union[float, str]]:
ip = get_ipython()
if dist == 'united':
data1d = ip.user_ns['_data1dunited'][samplename]
elif dist is None:
try:
data1d = ip.user_ns['_data1dunited'][samplename]
dist = 'united'
except KeyError:
data1d = ip.user_ns['_data1d'][samplename]
dist = sorted(data1d.keys())[0]
data1d = data1d[dist]
else:
data1d = ip.user_ns['_data1d'][samplename]
dist = sorted(list(data1d.keys()), key=lambda k:abs(float(dist) - k))[0]
data1d = data1d[dist]
return data1d, dist
def getsasexposure(samplename, dist=None) -> Tuple[Curve, float]:
ip = get_ipython()
if dist is None:
data2d = ip.user_ns['_data2d'][samplename]
dist = sorted(data2d.keys())[0]
data2d = data2d[dist]
else:
data2d = ip.user_ns['_data2d'][samplename]
dist = sorted(list(data2d.keys()), key=lambda k:abs(float(dist) - k))[0]
data2d = data2d[dist]
return data2d, dist
def getheaders(processed=True) -> List[Header]:
ip = get_ipython()
if processed:
return ip.user_ns['_headers']['processed']
else:
return ip.user_ns['_headers']['raw']
def getdists(samplename) -> List[float]:
ip = get_ipython()
return sorted([d for d in ip.user_ns['_headers_sample'][samplename]])
def get_different_distances(headers, tolerance=2) -> List[float]:
alldists = {float(h.distance) for h in headers}
dists = []
for d in alldists:
if [d_ for d_ in dists if abs(d - d_) < tolerance]:
continue
dists.append(d)
return sorted(dists)
def load_exposure(fsn:int, raw=True, processed=True) -> Exposure:
ip = get_ipython()
for l in ip.user_ns['_loaders']:
assert isinstance(l, Loader)
if l.processed and not processed:
continue
if not l.processed and not raw:
continue
try:
return l.loadexposure(fsn)
except (OSError, ValueError):
continue
raise FileNotFoundError('Cannot find exposure for fsn #{:d}'.format(fsn))
def load_mask(maskname: str) -> np.ndarray:
ip = get_ipython()
for l in ip.user_ns['_loaders']:
assert isinstance(l, Loader)
try:
return l.loadmask(maskname)
except OSError:
continue
raise FileNotFoundError('Cannot load mask file {}'.format(maskname))
|
awacha/credolib
|
credolib/io.py
|
load_headers
|
python
|
def load_headers(fsns:List[int]):
ip = get_ipython()
ip.user_ns['_headers'] = {}
for type_ in ['raw', 'processed']:
print("Loading %d headers (%s)" % (len(fsns), type_), flush=True)
processed = type_ == 'processed'
headers = []
for f in fsns:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed == processed]:
try:
headers.append(l.loadheader(f))
break
except FileNotFoundError:
continue
allsamplenames = {h.title for h in headers}
if not headers:
print('NO HEADERS READ FOR TYPE "%s"' % type_)
else:
print("%d headers (%s) out of %d have been loaded successfully." % (len(headers), type_, len(fsns)))
print('Read FSN range:', min([h.fsn for h in headers]), 'to', max([h.fsn for h in headers]))
print("Samples covered by these headers:")
print(" " + "\n ".join(sorted(allsamplenames)), flush=True)
if processed:
ip.user_ns['allsamplenames'] = allsamplenames
ip.user_ns['_headers'][type_] = headers
|
Load header files
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/io.py#L29-L55
| null |
__all__ = ['load_headers', 'getsascurve', 'getsasexposure', 'getheaders', 'getdists', 'filter_headers', 'load_exposure',
'load_mask']
from typing import List, Tuple, Union
import numpy as np
from IPython.core.getipython import get_ipython
from sastool.classes2.curve import Curve
from sastool.classes2.exposure import Exposure
from sastool.classes2.header import Header
from sastool.classes2.loader import Loader
def filter_headers(criterion):
"""Filter already loaded headers against some criterion.
The criterion function must accept a single argument, which is an instance
of sastool.classes2.header.Header, or one of its subclasses. The function
must return True if the header is to be kept or False if it needs to be
discarded. All manipulations on the header (including sample name changes,
etc.) carried out by this function are preserved.
"""
ip = get_ipython()
for headerkind in ['processed', 'raw']:
for h in ip.user_ns['_headers'][headerkind][:]:
if not criterion(h):
ip.user_ns['_headers'][headerkind].remove(h)
ip.user_ns['allsamplenames'] = {h.title for h in ip.user_ns['_headers']['processed']}
def getsascurve(samplename:str, dist=None) -> Tuple[Curve, Union[float, str]]:
ip = get_ipython()
if dist == 'united':
data1d = ip.user_ns['_data1dunited'][samplename]
elif dist is None:
try:
data1d = ip.user_ns['_data1dunited'][samplename]
dist = 'united'
except KeyError:
data1d = ip.user_ns['_data1d'][samplename]
dist = sorted(data1d.keys())[0]
data1d = data1d[dist]
else:
data1d = ip.user_ns['_data1d'][samplename]
dist = sorted(list(data1d.keys()), key=lambda k:abs(float(dist) - k))[0]
data1d = data1d[dist]
return data1d, dist
def getsasexposure(samplename, dist=None) -> Tuple[Curve, float]:
ip = get_ipython()
if dist is None:
data2d = ip.user_ns['_data2d'][samplename]
dist = sorted(data2d.keys())[0]
data2d = data2d[dist]
else:
data2d = ip.user_ns['_data2d'][samplename]
dist = sorted(list(data2d.keys()), key=lambda k:abs(float(dist) - k))[0]
data2d = data2d[dist]
return data2d, dist
def getheaders(processed=True) -> List[Header]:
ip = get_ipython()
if processed:
return ip.user_ns['_headers']['processed']
else:
return ip.user_ns['_headers']['raw']
def getdists(samplename) -> List[float]:
ip = get_ipython()
return sorted([d for d in ip.user_ns['_headers_sample'][samplename]])
def get_different_distances(headers, tolerance=2) -> List[float]:
alldists = {float(h.distance) for h in headers}
dists = []
for d in alldists:
if [d_ for d_ in dists if abs(d - d_) < tolerance]:
continue
dists.append(d)
return sorted(dists)
def load_exposure(fsn:int, raw=True, processed=True) -> Exposure:
ip = get_ipython()
for l in ip.user_ns['_loaders']:
assert isinstance(l, Loader)
if l.processed and not processed:
continue
if not l.processed and not raw:
continue
try:
return l.loadexposure(fsn)
except (OSError, ValueError):
continue
raise FileNotFoundError('Cannot find exposure for fsn #{:d}'.format(fsn))
def load_mask(maskname: str) -> np.ndarray:
ip = get_ipython()
for l in ip.user_ns['_loaders']:
assert isinstance(l, Loader)
try:
return l.loadmask(maskname)
except OSError:
continue
raise FileNotFoundError('Cannot load mask file {}'.format(maskname))
|
awacha/credolib
|
credolib/interpretation.py
|
guinieranalysis
|
python
|
def guinieranalysis(samplenames, qranges=None, qmax_from_shanum=True, prfunctions_postfix='', dist=None,
plotguinier=True, graph_extension='.png', dmax=None, dmax_from_shanum=False):
figpr = plt.figure()
ip = get_ipython()
axpr = figpr.add_subplot(1, 1, 1)
if qranges is None:
qranges = {'__default__': (0, 1000000)}
if dmax is None:
dmax = {'__default__': None}
if '__default__' not in qranges:
qranges['__default__'] = (0, 1000000)
if '__default__' not in dmax:
dmax['__default__'] = None
table_autorg = [['Name', 'Rg (nm)', 'I$_0$ (cm$^{-1}$ sr$^{-1}$)',
'q$_{min}$ (nm$^{-1}$)', 'q$_{max}$ (nm$^{-1}$)',
'qmin*Rg', 'qmax*Rg', 'quality', 'aggregation',
'Dmax (nm)', 'q$_{shanum}$ (nm$^{-1}$)']]
table_gnom = [['Name', 'Rg (nm)', 'I$_0$ (cm$^{-1}$ sr$^{-1}$)',
'qmin (nm$^{-1}$)', 'qmax (nm$^{-1}$)',
'Dmin (nm)', 'Dmax (nm)', 'Total estimate', 'Porod volume (nm$^3$)']]
results = {}
for sn in samplenames:
if sn not in qranges:
print('Q-range not given for sample {}: using default one'.format(sn))
qrange = qranges['__default__']
else:
qrange = qranges[sn]
if sn not in dmax:
dmax_ = dmax['__default__']
else:
dmax_ = dmax[sn]
print('Using q-range for sample {}: {} <= q <= {}'.format(sn, qrange[0], qrange[1]))
curve = getsascurve(sn, dist)[0].trim(*qrange).sanitize()
curve.save(sn + '.dat')
try:
Rg, I0, qmin, qmax, quality, aggregation = autorg(sn + '.dat')
except ValueError:
print('Error running autorg on %s' % sn)
continue
dmax_shanum, nsh, nopt, qmaxopt = shanum(sn + '.dat')
if qmax_from_shanum:
curve_trim = curve.trim(qmin, qmaxopt)
else:
curve_trim = curve.trim(qmin, qrange[1])
if dmax_from_shanum:
dmax_ = dmax_from_shanum
curve_trim.save(sn + '_optrange.dat')
if dmax_ is None:
print('Calling DATGNOM for sample {} with Rg={}, q-range from {} to {}'.format(
sn, Rg.val, curve_trim.q.min(), curve_trim.q.max()))
gnompr, metadata = datgnom(sn + '_optrange.dat', Rg=Rg.val, noprint=True)
else:
print('Calling GNOM for sample {} with Rmax={}, q-range from {} to {}'.format(
sn, dmax_, curve_trim.q.min(), curve_trim.q.max()))
gnompr, metadata = gnom(curve_trim, dmax_)
rg, i0, vporod = datporod(sn + '_optrange.out')
axpr.errorbar(gnompr[:, 0], gnompr[:, 1], gnompr[:, 2], None, label=sn)
if plotguinier:
figsample = plt.figure()
axgnomfit = figsample.add_subplot(1, 2, 1)
curve.errorbar('b.', axes=axgnomfit, label='measured')
axgnomfit.errorbar(metadata['qj'], metadata['jexp'], metadata['jerror'], None, 'g.', label='gnom input')
axgnomfit.loglog(metadata['qj'], metadata['jreg'], 'r-', label='regularized by GNOM')
figsample.suptitle(sn)
axgnomfit.set_xlabel('q (nm$^{-1}$)')
axgnomfit.set_ylabel('$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)')
axgnomfit.axvline(qmaxopt, 0, 1, linestyle='dashed', color='black', lw=2)
axgnomfit.grid(True, which='both')
axgnomfit.axis('tight')
axgnomfit.legend(loc='best')
axguinier = figsample.add_subplot(1, 2, 2)
axguinier.errorbar(curve.q, curve.Intensity, curve.Error, curve.qError, '.', label='Measured')
q = np.linspace(qmin, qmax, 100)
axguinier.plot(q, I0.val * np.exp(-q ** 2 * Rg.val ** 2 / 3), label='AutoRg')
axguinier.plot(q, metadata['I0_gnom'].val * np.exp(-q ** 2 * metadata['Rg_gnom'].val ** 2 / 3),
label='Gnom')
axguinier.set_xscale('power', exponent=2)
axguinier.set_yscale('log')
axguinier.set_xlabel('q (nm$^{-1}$)')
axguinier.set_ylabel('$d\Sigma/d\Omega$ (cm$^{-1}$ sr$^{-1}$)')
axguinier.legend(loc='best')
idxmin = np.arange(len(curve))[curve.q <= qmin].max()
idxmax = np.arange(len(curve))[curve.q >= qmax].min()
idxmin = max(0, idxmin - 5)
idxmax = min(len(curve) - 1, idxmax + 5)
if plotguinier:
curveguinier = curve.trim(curve.q[idxmin], curve.q[idxmax])
axguinier.axis(xmax=curve.q[idxmax], xmin=curve.q[idxmin], ymin=curveguinier.Intensity.min(),
ymax=curveguinier.Intensity.max())
axguinier.grid(True, which='both')
table_gnom.append(
[sn, metadata['Rg_gnom'].tostring(extra_digits=2), metadata['I0_gnom'].tostring(extra_digits=2),
metadata['qmin'], metadata['qmax'],
metadata['dmin'], metadata['dmax'], metadata['totalestimate_corrected'], vporod])
table_autorg.append([sn, Rg.tostring(extra_digits=2), I0, '%.3f' % qmin, '%.3f' % qmax, qmin * Rg, qmax * Rg,
'%.1f %%' % (quality * 100), aggregation, '%.3f' % dmax_shanum, '%.3f' % qmaxopt])
if plotguinier:
figsample.tight_layout()
figsample.savefig(os.path.join(ip.user_ns['auximages_dir'], 'guinier_%s%s' % (sn, graph_extension)),
dpi=600)
results[sn] = {
'Rg_autorg' : Rg, 'I0_autorg': I0,
'qmin_autorg': qmin, 'qmax_autorg': qmax,
'quality' : quality, 'aggregation': aggregation,
'dmax_autorg': dmax_shanum, 'qmax_shanum': qmaxopt,
'Rg_gnom' : metadata['Rg_gnom'],
'I0_gnom' : metadata['I0_gnom'],
'qmin_gnom' : metadata['qmin'],
'qmax_gnom' : metadata['qmax'],
'dmin_gnom' : metadata['dmin'],
'dmax_gnom' : metadata['dmax'],
'VPorod' : vporod,
}
axpr.set_xlabel('r (nm)')
axpr.set_ylabel('P(r)')
axpr.legend(loc='best')
axpr.grid(True, which='both')
writemarkdown('## Results from autorg and shanum')
tab = ipy_table.IpyTable(table_autorg)
tab.apply_theme('basic')
display(tab)
writemarkdown('## Results from gnom')
tab = ipy_table.IpyTable(table_gnom)
tab.apply_theme('basic')
if prfunctions_postfix and prfunctions_postfix[0] != '_':
prfunctions_postfix = '_' + prfunctions_postfix
figpr.tight_layout()
figpr.savefig(os.path.join(ip.user_ns['auximages_dir'], 'prfunctions%s%s' % (prfunctions_postfix, graph_extension)),
dpi=600)
display(tab)
return results
|
Perform Guinier analysis on the samples.
Inputs:
samplenames: list of sample names
qranges: dictionary of q ranges for each sample. The keys are sample names. The special '__default__' key
corresponds to all samples which do not have a key in the dict.
qmax_from_shanum: use the qmax determined by the shanum program for the GNOM input.
prfunctions_postfix: The figure showing the P(r) functions will be saved as
prfunctions_<prfunctions_postfix><graph_extension>
dist: the sample-to-detector distance to use.
plotguinier: if Guinier plots are needed.
graph_extension: the extension of the saved graph image files.
dmax: Dict of Dmax parameters. If not found or None, determine automatically using DATGNOM. If found,
GNOM is used. The special key '__default__' works in a similar fashion as for `qranges`.
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/interpretation.py#L16-L161
|
[
"def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):\n \"\"\"Execute autorg.\n\n Inputs:\n filename: either a name of an ascii file, or an instance of Curve.\n mininterval: the minimum number of points in the Guinier range\n qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0\n qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3\n noprint: if the output of autorg should be redirected to the null \n device.\n\n Outputs:\n Rg as an ErrorValue\n I0 as an ErrorValue\n qmin: the lower end of the chosen Guinier range\n qmax: the upper end of the chosen Guinier range\n quality: the quality parameter, between 0 and 1\n aggregation: float, the extent of aggregation\n \"\"\"\n if isinstance(filename, Curve):\n curve = filename\n with tempfile.NamedTemporaryFile('w+b',\n delete=False) as f:\n curve.save(f)\n filename = f.name\n cmdline = ['autorg', filename, '-f', 'ssv']\n if mininterval is not None:\n cmdline.extend(['--mininterval', str(mininterval)])\n if qminrg is not None:\n cmdline.extend(['--sminrg', str(qminrg)])\n if qmaxrg is not None:\n cmdline.extend(['--smaxrg', str(qmaxrg)])\n result = execute_command(cmdline, noprint=noprint)\n Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)\n try:\n curve\n except NameError:\n curve = Curve.new_from_file(filename)\n else:\n os.unlink(filename)\n return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[\n int(idxlast) - 1], float(quality), float(aggregation)\n",
"def shanum(filename, dmax=None, noprint=True):\n \"\"\"Execute the shanum program to determine the optimum qmax\n according to an estimation of the optimum number of Shannon\n channels.\n\n Inputs:\n filename: either a name of an ascii file, or an instance\n of Curve\n dmax: the cut-off of the P(r) function, if known. If None,\n this will be determined by the shanum program\n noprint: if the printout of the program is to be suppressed.\n\n Outputs: dmax, nsh, nopt, qmaxopt\n dmax: the cut-off of the P(r) function.\n nsh: the estimated number of Shannon channels\n nopt: the optimum number of Shannon channels\n qmaxopt: the optimum value of the high-q cutoff\n \"\"\"\n if isinstance(filename, Curve):\n curve = filename\n with tempfile.NamedTemporaryFile('w+b', delete=False) as f:\n curve.save(f)\n filename = f.name\n cmdline = ['shanum', filename]\n if dmax is not None:\n cmdline.append(str(float(dmax)))\n result = execute_command(cmdline, noprint=noprint)\n for l in result:\n l = l.strip()\n if l.startswith('Dmax='):\n dmax = float(l.split('=')[1])\n elif l.startswith('Smax='):\n qmax = float(l.split('=')[1])\n elif l.startswith('Nsh='):\n nsh = float(l.split('=')[1])\n elif l.startswith('Nopt='):\n nopt = float(l.split('=')[1])\n elif l.startswith('Sopt='):\n qmaxopt = float(l.split('=')[1])\n\n return dmax, nsh, nopt, qmaxopt\n",
"def datgnom(filename, Rg=None, noprint=True):\n if Rg is None:\n Rg, I0, idxfirst, idxlast, quality, aggregation = autorg(filename)\n execute_command(['datgnom', filename, '-r', '%f' % float(Rg)],\n noprint=noprint)\n gnomoutputfilename = filename.rsplit('.', 1)[0] + '.out'\n gnomdata, metadata = read_gnom_pr(gnomoutputfilename, get_metadata=True)\n return gnomdata, metadata\n",
"def datporod(gnomoutfile):\n \"\"\"Run datporod and return the estimated Porod volume.\n\n Returns:\n Radius of gyration found in the input file\n I0 found in the input file\n Vporod: the estimated Porod volume\n \"\"\"\n results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()\n return float(results[0]), float(results[1]), float(results[2])\n",
"def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):\n \"\"\"Run GNOM on the dataset.\n\n Inputs:\n curve: an instance of sastool.classes2.Curve or anything which has a\n save() method, saving the scattering curve to a given .dat file,\n in q=4*pi*sin(theta)/lambda [1/nm] units\n Rmax: the estimated maximum extent of the scattering object, in nm.\n outputfilename: the preferred name of the output file. If not given,\n the .out file produced by gnom will be lost.\n Npoints_realspace: the expected number of points in the real space\n initial_alpha: the initial value of the regularization parameter.\n\n Outputs:\n the same as of read_gnom_pr()\n \"\"\"\n with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:\n curve.save(os.path.join(td, 'curve.dat'))\n print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))\n if Npoints_realspace is None:\n Npoints_realspace = \"\"\n else:\n Npoints_realspace = str(Npoints_realspace)\n if initial_alpha is None:\n initial_alpha = \"\"\n else:\n initial_alpha = str(initial_alpha)\n # GNOM questions and our answers:\n # Printer type [ postscr ] : <ENTER>\n # Input data, first file : <curve.dat in the temporary directory><ENTER>\n # Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>\n # No of start points to skip [ 0 ] : 0<ENTER>\n # ... (just GNOM output)\n # ... (just GNOM output)\n # Input data, second file [ none ] : <ENTER>\n # No of end points to omit [ 0 ] : 0<ENTER>\n # ... (just GNOM output)\n # ... (just GNOM output)\n # Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>\n # Plot input dataa (Y/N) [ Yes ] : N<ENTER>\n # File containing expert parameters [ none ] : <ENTER>\n # Kernel already calculated (Y/N) [ No ] : N<ENTER>\n # Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>\n # Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>\n # Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>\n # -- Arbitrary monodisperse system --\n # Rmin=0, Rmax is maximum particle diameter\n # Rmax for evaluating p(r) : <Rmax * 10><ENTER>\n # Number of points in real space [(always different)] : <Npoints_realspace><ENTER>\n # Kernel-storage file name [ kern.bin ] : <ENTER>\n # Experimental setup (0/1/2) [ 0 ] : 0<ENTER>\n # Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>\n # Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>\n # Plot results (Y/N) [ Yes ] : N<ENTER>\n # ... solution ...\n # Your choice : <ENTER>\n # Evaluate errors (Y/N) [ Yes ] : Y<ENTER>\n # Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>\n # Next data set (Yes/No/Same) [ No ] : N<ENTER>\n gnominput = \"\\n%s\\n%s\\n0\\n\\n0\\n2\\nN\\n\\nN\\n0\\nY\\nY\\n%f\\n%s\\n\\n0\\n%s\\nN\\nN\\n\\nY\\nN\\nN\\n\" % (\n os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)\n result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n input=gnominput.encode('utf-8'))\n pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)\n pr[:, 0] /= 10\n metadata['q'] *= 10\n metadata['qj'] *= 10\n metadata['qmin'] *= 10\n metadata['qmax'] *= 10\n metadata['dmax'] /= 10\n metadata['dmin'] /= 10\n metadata['Rg_guinier'] /= 10\n metadata['Rg_gnom'] /= 10\n if outputfilename is not None:\n shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)\n return pr, metadata\n",
"def getsascurve(samplename:str, dist=None) -> Tuple[Curve, Union[float, str]]:\n ip = get_ipython()\n if dist == 'united':\n data1d = ip.user_ns['_data1dunited'][samplename]\n elif dist is None:\n try:\n data1d = ip.user_ns['_data1dunited'][samplename]\n dist = 'united'\n except KeyError:\n data1d = ip.user_ns['_data1d'][samplename]\n dist = sorted(data1d.keys())[0]\n data1d = data1d[dist]\n else:\n data1d = ip.user_ns['_data1d'][samplename]\n dist = sorted(list(data1d.keys()), key=lambda k:abs(float(dist) - k))[0]\n data1d = data1d[dist]\n return data1d, dist\n",
"def writemarkdown(*args):\n display(Markdown(' '.join(str(a) for a in args)))\n"
] |
__all__ = ['guinieranalysis']
import os
import ipy_table
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.display import display
from .atsas import autorg, gnom, datgnom, shanum, datporod
from .io import getsascurve
from .utils import writemarkdown
|
awacha/credolib
|
credolib/atsas.py
|
autorg
|
python
|
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b',
delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['autorg', filename, '-f', 'ssv']
if mininterval is not None:
cmdline.extend(['--mininterval', str(mininterval)])
if qminrg is not None:
cmdline.extend(['--sminrg', str(qminrg)])
if qmaxrg is not None:
cmdline.extend(['--smaxrg', str(qmaxrg)])
result = execute_command(cmdline, noprint=noprint)
Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)
try:
curve
except NameError:
curve = Curve.new_from_file(filename)
else:
os.unlink(filename)
return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[
int(idxlast) - 1], float(quality), float(aggregation)
|
Execute autorg.
Inputs:
filename: either a name of an ascii file, or an instance of Curve.
mininterval: the minimum number of points in the Guinier range
qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0
qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3
noprint: if the output of autorg should be redirected to the null
device.
Outputs:
Rg as an ErrorValue
I0 as an ErrorValue
qmin: the lower end of the chosen Guinier range
qmax: the upper end of the chosen Guinier range
quality: the quality parameter, between 0 and 1
aggregation: float, the extent of aggregation
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L156-L197
|
[
"def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):\n if isinstance(input_to_command, str):\n stdin = subprocess.PIPE\n else:\n stdin = input_to_command\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)\n if (isinstance(input_to_command, str)):\n input_to_command = input_to_command.encode('utf-8')\n if isinstance(input_to_command, bytes):\n popen.stdin.write(input_to_command)\n lines_iterator = itertools.chain(popen.stdout, popen.stderr)\n resultinglines = []\n for line in lines_iterator:\n if not noprint:\n if not eat_output:\n print(str(line[:-1], encoding='utf-8'), flush=True)\n else:\n print(\".\", end='', flush=True)\n resultinglines.append(str(line[:-1], encoding='utf-8'))\n return resultinglines\n"
] |
__all__ = ['read_gnom_pr', 'execute_command', 'autorg', 'shanum', 'datgnom', 'dammif', 'bodies', 'datcmp', 'datporod',
'gnom']
import itertools
import os
import re
import shutil
import subprocess
import tempfile
import ipy_table
import numpy as np
from IPython.display import display
from sastool.classes2.curve import Curve
from sastool.misc.errorvalue import ErrorValue
def read_gnom_pr(filename, get_metadata=False):
metadata = {}
with open(filename, 'rt', encoding='utf-8') as f:
l = f.readline()
while 'Final results' not in l:
l = f.readline()
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip() == 'Parameter DISCRP OSCILL STABIL SYSDEV POSITV VALCEN')
parameters = {'DISCRP': {}, 'OSCILL': {}, 'STABIL': {}, 'SYSDEV': {}, 'POSITV': {}, 'VALCEN': {}}
for i in range(6):
line = f.readline().strip().split()
if i == 4:
# this line contains only a dashed line: "- - - - - - etc."
assert (all([l == '-' for l in line]))
continue
what = line[0]
(parameters['DISCRP'][what], parameters['OSCILL'][what],
parameters['STABIL'][what], parameters['SYSDEV'][what],
parameters['POSITV'][what], parameters['VALCEN'][what]) = tuple([
float(x) for x in line[1:]])
te = tw = 0
for p in parameters:
par = parameters[p]
par['Estimate_corrected'] = np.exp(-(par['Ideal'] - par['Current']) ** 2 / par['Sigma'] ** 2)
te += par['Estimate_corrected'] * par['Weight']
tw += par['Weight']
metadata['totalestimate_corrected'] = te / tw
metadata['parameters'] = parameters
assert (not f.readline().strip()) # skip empty line
match = re.match(r'Angular\s+range\s+:\s+from\s+(?P<qmin>\d+\.\d+)\s+to\s+(?P<qmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['qmin'] = float(match.groupdict()['qmin'])
metadata['qmax'] = float(match.groupdict()['qmax'])
match = re.match(r'Real\s+space\s+range\s+:\s+from\s+(?P<dmin>\d+\.\d+)\s+to\s+(?P<dmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['dmin'] = float(match.groupdict()['dmin'])
metadata['dmax'] = float(match.groupdict()['dmax'])
assert (not f.readline().strip())
match = re.match(r'Highest ALPHA \(theor\) :\s+(?P<highestalpha>\d+\.\d+E[+-]?\d+)', f.readline().strip())
assert (match is not None)
metadata['highestalpha'] = float(match.groupdict()['highestalpha'])
match = re.match(
r'Current ALPHA\s+:\s+(?P<currentalpha>\d+\.\d+E[+-]\d+)\s+Rg : (?P<Rg>\d+\.\d+E[+-]\d+)\s+I\(0\) :\s+(?P<I0>\d+\.\d+E[+-]\d+)',
f.readline().strip())
assert (match is not None)
metadata['currentalpha'] = float(match.groupdict()['currentalpha'])
metadata['Rg_guinier'] = float(match.groupdict()['Rg'])
metadata['I0_guinier'] = float(match.groupdict()['I0'])
assert (not f.readline().strip()) # skip empty line
match = re.match(
r'Total estimate : (?P<totalestimate>\d+\.\d+)\s+ which is \s+(?P<qualitystring>.*)\s+solution',
f.readline().strip())
assert (match is not None)
metadata['totalestimate'] = float(match.groupdict()['totalestimate'])
metadata['qualitystring'] = match.groupdict()['qualitystring']
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['S', 'J', 'EXP', 'ERROR', 'J', 'REG', 'I', 'REG'])
assert (not f.readline().strip()) # skip empty line
s = []
sj = []
jexp = []
jerror = []
jreg = []
ireg = []
l = f.readline()
while l.strip():
terms = [float(x) for x in l.strip().split()]
s.append(terms[0])
ireg.append(terms[-1])
if len(terms) > 2:
sj.append(terms[0])
jexp.append(terms[1])
jerror.append(terms[2])
jreg.append(terms[3])
l = f.readline()
metadata['q'] = np.array(s)
metadata['qj'] = np.array(sj)
metadata['jexp'] = np.array(jexp)
metadata['jerror'] = np.array(jerror)
metadata['jreg'] = np.array(jreg)
metadata['ireg'] = np.array(ireg)
assert ('Distance distribution function of particle' == f.readline().strip())
assert (not f.readline().strip()) # skip empty line
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['R', 'P(R)', 'ERROR'])
assert (not f.readline().strip()) # skip empty line
data = []
while True:
l = f.readline()
if not l.strip():
break
if not l.strip():
continue
try:
data.append([float(f_) for f_ in l.strip().split()])
except ValueError:
if 'Reciprocal space' in l:
break
except:
raise
l = f.readline()
match = re.match(
r'Real space: Rg =\s+(?P<Rg>\d+\.\d+(E[+-]?\d+)?) \+- (?P<dRg>\d+\.\d+(E[+-]?\d+)?)\s+I\(0\) =\s+(?P<I0>\d+\.\d+(E[+-]?\d+)?) \+-\s+(?P<dI0>\d+\.\d+(E[+-]?\d+)?)',
l.strip())
assert (match is not None)
metadata['Rg_gnom'] = ErrorValue(float(match.groupdict()['Rg']), float(match.groupdict()['dRg']))
metadata['I0_gnom'] = ErrorValue(float(match.groupdict()['I0']), float(match.groupdict()['dI0']))
if get_metadata:
return (np.array(data), metadata)
else:
return (np.array(data),)
def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):
if isinstance(input_to_command, str):
stdin = subprocess.PIPE
else:
stdin = input_to_command
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)
if (isinstance(input_to_command, str)):
input_to_command = input_to_command.encode('utf-8')
if isinstance(input_to_command, bytes):
popen.stdin.write(input_to_command)
lines_iterator = itertools.chain(popen.stdout, popen.stderr)
resultinglines = []
for line in lines_iterator:
if not noprint:
if not eat_output:
print(str(line[:-1], encoding='utf-8'), flush=True)
else:
print(".", end='', flush=True)
resultinglines.append(str(line[:-1], encoding='utf-8'))
return resultinglines
def datgnom(filename, Rg=None, noprint=True):
if Rg is None:
Rg, I0, idxfirst, idxlast, quality, aggregation = autorg(filename)
execute_command(['datgnom', filename, '-r', '%f' % float(Rg)],
noprint=noprint)
gnomoutputfilename = filename.rsplit('.', 1)[0] + '.out'
gnomdata, metadata = read_gnom_pr(gnomoutputfilename, get_metadata=True)
return gnomdata, metadata
def dammif(gnomoutputfilename, prefix=None, mode='fast', symmetry='P1', N=None,
noprint=True):
if prefix is None:
prefix = 'dammif_' + gnomoutputfilename.rsplit('.', 1)[0]
if N is None:
execute_command(['dammif', '--prefix=%s' % prefix, '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
return prefix + '-1.pdb'
else:
ret = []
for i in range(N):
execute_command(['dammif', '--prefix=%s_%03d' % (prefix, i), '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
ret.append('%s_%03d-1.pdb' % (prefix, i))
return ret
def shanum(filename, dmax=None, noprint=True):
"""Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels.
Inputs:
filename: either a name of an ascii file, or an instance
of Curve
dmax: the cut-off of the P(r) function, if known. If None,
this will be determined by the shanum program
noprint: if the printout of the program is to be suppressed.
Outputs: dmax, nsh, nopt, qmaxopt
dmax: the cut-off of the P(r) function.
nsh: the estimated number of Shannon channels
nopt: the optimum number of Shannon channels
qmaxopt: the optimum value of the high-q cutoff
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['shanum', filename]
if dmax is not None:
cmdline.append(str(float(dmax)))
result = execute_command(cmdline, noprint=noprint)
for l in result:
l = l.strip()
if l.startswith('Dmax='):
dmax = float(l.split('=')[1])
elif l.startswith('Smax='):
qmax = float(l.split('=')[1])
elif l.startswith('Nsh='):
nsh = float(l.split('=')[1])
elif l.startswith('Nopt='):
nopt = float(l.split('=')[1])
elif l.startswith('Sopt='):
qmaxopt = float(l.split('=')[1])
return dmax, nsh, nopt, qmaxopt
def bodies(filename, bodytypes=None, prefix=None, fit_timeout=10, Ndummyatoms=2000, noprint=True):
BODIES = ['ellipsoid', 'rotation-ellipsoid', 'cylinder', 'elliptic-cylinder', 'hollow-cylinder', 'parallelepiped',
'hollow-sphere', 'dumbbell']
if bodytypes is None:
bodytypes = BODIES
unknownbodies = [b for b in bodytypes if b not in BODIES]
if unknownbodies:
raise ValueError('Unknown body type(s): ' + ', '.join(unknownbodies))
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
assert (prefix is not None)
else:
if prefix is None:
prefix = filename.rsplit('.', 1)[0]
fittingresults = {}
for b in bodytypes:
print('Fitting geometrical body %s' % b, flush=True)
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'f\n%s\n%d\n\n\n\n\n\n\n%s\n' % (
filename.encode('utf-8'), BODIES.index(b) + 1, prefix.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Fitting timed out.')
continue
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr:
print('Error: ', stderr, flush=True)
printing_on = False
parameter_recording_on = False
bodyparameters = []
bodyparameternames = []
fittingresults[b] = {}
for s in stdout.split('\n'):
if s.startswith(' Input file name'):
printing_on = True
if printing_on and not noprint:
print(s, flush=True)
if s.startswith(' Body type'):
parameter_recording_on = True
if s.startswith(' Parameter \'scale\''):
parameter_recording_on = False
if parameter_recording_on and s.startswith(' Parameter \''):
bodyparameters.append(float(s.split(':')[1].strip()))
bodyparameternames.append(s[s.index("'") + 1:(s.index("'") + s[s.index("'") + 1:].index("'") + 1)])
if s.startswith(' Expected Radius of Gyration'):
fittingresults[b]['Rgexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected I0'):
fittingresults[b]['I0exp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected Volume'):
fittingresults[b]['Volexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Radius of Gyration'):
fittingresults[b]['Rgfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit I0'):
fittingresults[b]['I0fit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Volume'):
fittingresults[b]['Volfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Goodness of Fit (chi-square)'):
fittingresults[b]['Chi2'] = float(s.split(':')[1].strip())
if 'Chi2' not in fittingresults[b]:
print('Error: cannot open file {}'.format(filename))
return
fittingresults[b]['stdout_from_bodies'] = stdout
fittingresults[b]['type'] = b
fittingresults[b]['bodyparameters'] = bodyparameters
fittingresults[b]['bodyparameternames'] = bodyparameternames
print('Creating DAM model')
damoutputfile = prefix + '-' + b + '.pdb'
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'd\n%d\n' % (BODIES.index(b) + 1) + b'\n'.join(
[b'%6f' % (10 * v) for v in bodyparameters]) + b'\n1\n%d\n%s\n' % (
Ndummyatoms, damoutputfile.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Error creating DAM model.')
if stderr:
print(stderr)
tab = [['Body', 'Goodness of Fit ($\chi^2$)', 'Rg mismatch', 'I0 mismatch', 'Volume mismatch']]
for b in sorted(fittingresults):
tab.append([
fittingresults[b]['type'] + ' (' + ', '.join(
['%s=%.3f nm' % (var, val) for var, val in zip(fittingresults[b]['bodyparameternames'],
fittingresults[b]['bodyparameters'])]) + ')',
fittingresults[b]['Chi2'],
'%.2f nm' % (fittingresults[b]['Rgfit'] - fittingresults[b]['Rgexp']),
'%5g cm$^{-1}$ sr$^{-1}$' % (fittingresults[b]['I0fit'] - fittingresults[b]['I0exp']),
'%.2f nm^3' % (fittingresults[b]['Volfit'] - fittingresults[b]['Volexp']),
])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
return fittingresults
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'):
"""Run datcmp on the scattering curves.
Inputs:
*curves: scattering curves as positional arguments
alpha: confidence parameter
adjust: adjustment type (string), see the help of datcmp for details
test: test (string), see the help of datcmp for details
Outputs:
matC: the C matrix
matp: the matrix of the p values comparing the i-th and j-th exposure
matpadj: adjusted p-matrix of the exposures
ok: list of the same length as the number of curves. If True, the
given curve does not differ significantly from the others.
"""
if len({len(c) for c in curves}) != 1:
raise ValueError('All curves have to be of the same length.')
datcmpargs = []
if alpha is not None:
datcmpargs.append('--alpha=%f' % alpha)
if adjust is not None:
datcmpargs.append('--adjust=%s' % adjust)
if test is not None:
datcmpargs.append('--test=%s' % test)
with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td:
for i, c in enumerate(curves):
mat = np.zeros((len(c), 3))
mat[:, 0] = c.q
mat[:, 1] = c.Intensity
mat[:, 2] = c.Error
np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat)
matC = np.zeros((len(curves), len(curves))) + np.nan
matp = np.zeros((len(curves), len(curves))) + np.nan
matpadj = np.zeros((len(curves), len(curves))) + np.nan
ok = np.zeros(len(curves)) + np.nan
try:
results = subprocess.check_output(
['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode(
'utf-8')
except subprocess.CalledProcessError:
pass
else:
for l in results.split('\n'):
m = re.match(
'^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$',
l)
if m is not None:
i = int(m.group('i')) - 1
j = int(m.group('j')) - 1
matC[i, j] = matC[j, i] = float(m.group('C'))
matp[i, j] = matp[j, i] = float(m.group('p'))
matpadj[i, j] = matpadj[j, i] = float(m.group('adjp'))
else:
m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l)
if m is not None:
ok[int(m.group('i')) - 1] = (m.group('ack') == '*')
return matC, matp, matpadj, ok
def datporod(gnomoutfile):
"""Run datporod and return the estimated Porod volume.
Returns:
Radius of gyration found in the input file
I0 found in the input file
Vporod: the estimated Porod volume
"""
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return float(results[0]), float(results[1]), float(results[2])
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
"""
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata
|
awacha/credolib
|
credolib/atsas.py
|
shanum
|
python
|
def shanum(filename, dmax=None, noprint=True):
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['shanum', filename]
if dmax is not None:
cmdline.append(str(float(dmax)))
result = execute_command(cmdline, noprint=noprint)
for l in result:
l = l.strip()
if l.startswith('Dmax='):
dmax = float(l.split('=')[1])
elif l.startswith('Smax='):
qmax = float(l.split('=')[1])
elif l.startswith('Nsh='):
nsh = float(l.split('=')[1])
elif l.startswith('Nopt='):
nopt = float(l.split('=')[1])
elif l.startswith('Sopt='):
qmaxopt = float(l.split('=')[1])
return dmax, nsh, nopt, qmaxopt
|
Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels.
Inputs:
filename: either a name of an ascii file, or an instance
of Curve
dmax: the cut-off of the P(r) function, if known. If None,
this will be determined by the shanum program
noprint: if the printout of the program is to be suppressed.
Outputs: dmax, nsh, nopt, qmaxopt
dmax: the cut-off of the P(r) function.
nsh: the estimated number of Shannon channels
nopt: the optimum number of Shannon channels
qmaxopt: the optimum value of the high-q cutoff
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L231-L271
|
[
"def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):\n if isinstance(input_to_command, str):\n stdin = subprocess.PIPE\n else:\n stdin = input_to_command\n popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)\n if (isinstance(input_to_command, str)):\n input_to_command = input_to_command.encode('utf-8')\n if isinstance(input_to_command, bytes):\n popen.stdin.write(input_to_command)\n lines_iterator = itertools.chain(popen.stdout, popen.stderr)\n resultinglines = []\n for line in lines_iterator:\n if not noprint:\n if not eat_output:\n print(str(line[:-1], encoding='utf-8'), flush=True)\n else:\n print(\".\", end='', flush=True)\n resultinglines.append(str(line[:-1], encoding='utf-8'))\n return resultinglines\n"
] |
__all__ = ['read_gnom_pr', 'execute_command', 'autorg', 'shanum', 'datgnom', 'dammif', 'bodies', 'datcmp', 'datporod',
'gnom']
import itertools
import os
import re
import shutil
import subprocess
import tempfile
import ipy_table
import numpy as np
from IPython.display import display
from sastool.classes2.curve import Curve
from sastool.misc.errorvalue import ErrorValue
def read_gnom_pr(filename, get_metadata=False):
metadata = {}
with open(filename, 'rt', encoding='utf-8') as f:
l = f.readline()
while 'Final results' not in l:
l = f.readline()
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip() == 'Parameter DISCRP OSCILL STABIL SYSDEV POSITV VALCEN')
parameters = {'DISCRP': {}, 'OSCILL': {}, 'STABIL': {}, 'SYSDEV': {}, 'POSITV': {}, 'VALCEN': {}}
for i in range(6):
line = f.readline().strip().split()
if i == 4:
# this line contains only a dashed line: "- - - - - - etc."
assert (all([l == '-' for l in line]))
continue
what = line[0]
(parameters['DISCRP'][what], parameters['OSCILL'][what],
parameters['STABIL'][what], parameters['SYSDEV'][what],
parameters['POSITV'][what], parameters['VALCEN'][what]) = tuple([
float(x) for x in line[1:]])
te = tw = 0
for p in parameters:
par = parameters[p]
par['Estimate_corrected'] = np.exp(-(par['Ideal'] - par['Current']) ** 2 / par['Sigma'] ** 2)
te += par['Estimate_corrected'] * par['Weight']
tw += par['Weight']
metadata['totalestimate_corrected'] = te / tw
metadata['parameters'] = parameters
assert (not f.readline().strip()) # skip empty line
match = re.match(r'Angular\s+range\s+:\s+from\s+(?P<qmin>\d+\.\d+)\s+to\s+(?P<qmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['qmin'] = float(match.groupdict()['qmin'])
metadata['qmax'] = float(match.groupdict()['qmax'])
match = re.match(r'Real\s+space\s+range\s+:\s+from\s+(?P<dmin>\d+\.\d+)\s+to\s+(?P<dmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['dmin'] = float(match.groupdict()['dmin'])
metadata['dmax'] = float(match.groupdict()['dmax'])
assert (not f.readline().strip())
match = re.match(r'Highest ALPHA \(theor\) :\s+(?P<highestalpha>\d+\.\d+E[+-]?\d+)', f.readline().strip())
assert (match is not None)
metadata['highestalpha'] = float(match.groupdict()['highestalpha'])
match = re.match(
r'Current ALPHA\s+:\s+(?P<currentalpha>\d+\.\d+E[+-]\d+)\s+Rg : (?P<Rg>\d+\.\d+E[+-]\d+)\s+I\(0\) :\s+(?P<I0>\d+\.\d+E[+-]\d+)',
f.readline().strip())
assert (match is not None)
metadata['currentalpha'] = float(match.groupdict()['currentalpha'])
metadata['Rg_guinier'] = float(match.groupdict()['Rg'])
metadata['I0_guinier'] = float(match.groupdict()['I0'])
assert (not f.readline().strip()) # skip empty line
match = re.match(
r'Total estimate : (?P<totalestimate>\d+\.\d+)\s+ which is \s+(?P<qualitystring>.*)\s+solution',
f.readline().strip())
assert (match is not None)
metadata['totalestimate'] = float(match.groupdict()['totalestimate'])
metadata['qualitystring'] = match.groupdict()['qualitystring']
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['S', 'J', 'EXP', 'ERROR', 'J', 'REG', 'I', 'REG'])
assert (not f.readline().strip()) # skip empty line
s = []
sj = []
jexp = []
jerror = []
jreg = []
ireg = []
l = f.readline()
while l.strip():
terms = [float(x) for x in l.strip().split()]
s.append(terms[0])
ireg.append(terms[-1])
if len(terms) > 2:
sj.append(terms[0])
jexp.append(terms[1])
jerror.append(terms[2])
jreg.append(terms[3])
l = f.readline()
metadata['q'] = np.array(s)
metadata['qj'] = np.array(sj)
metadata['jexp'] = np.array(jexp)
metadata['jerror'] = np.array(jerror)
metadata['jreg'] = np.array(jreg)
metadata['ireg'] = np.array(ireg)
assert ('Distance distribution function of particle' == f.readline().strip())
assert (not f.readline().strip()) # skip empty line
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['R', 'P(R)', 'ERROR'])
assert (not f.readline().strip()) # skip empty line
data = []
while True:
l = f.readline()
if not l.strip():
break
if not l.strip():
continue
try:
data.append([float(f_) for f_ in l.strip().split()])
except ValueError:
if 'Reciprocal space' in l:
break
except:
raise
l = f.readline()
match = re.match(
r'Real space: Rg =\s+(?P<Rg>\d+\.\d+(E[+-]?\d+)?) \+- (?P<dRg>\d+\.\d+(E[+-]?\d+)?)\s+I\(0\) =\s+(?P<I0>\d+\.\d+(E[+-]?\d+)?) \+-\s+(?P<dI0>\d+\.\d+(E[+-]?\d+)?)',
l.strip())
assert (match is not None)
metadata['Rg_gnom'] = ErrorValue(float(match.groupdict()['Rg']), float(match.groupdict()['dRg']))
metadata['I0_gnom'] = ErrorValue(float(match.groupdict()['I0']), float(match.groupdict()['dI0']))
if get_metadata:
return (np.array(data), metadata)
else:
return (np.array(data),)
def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):
if isinstance(input_to_command, str):
stdin = subprocess.PIPE
else:
stdin = input_to_command
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)
if (isinstance(input_to_command, str)):
input_to_command = input_to_command.encode('utf-8')
if isinstance(input_to_command, bytes):
popen.stdin.write(input_to_command)
lines_iterator = itertools.chain(popen.stdout, popen.stderr)
resultinglines = []
for line in lines_iterator:
if not noprint:
if not eat_output:
print(str(line[:-1], encoding='utf-8'), flush=True)
else:
print(".", end='', flush=True)
resultinglines.append(str(line[:-1], encoding='utf-8'))
return resultinglines
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):
"""Execute autorg.
Inputs:
filename: either a name of an ascii file, or an instance of Curve.
mininterval: the minimum number of points in the Guinier range
qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0
qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3
noprint: if the output of autorg should be redirected to the null
device.
Outputs:
Rg as an ErrorValue
I0 as an ErrorValue
qmin: the lower end of the chosen Guinier range
qmax: the upper end of the chosen Guinier range
quality: the quality parameter, between 0 and 1
aggregation: float, the extent of aggregation
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b',
delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['autorg', filename, '-f', 'ssv']
if mininterval is not None:
cmdline.extend(['--mininterval', str(mininterval)])
if qminrg is not None:
cmdline.extend(['--sminrg', str(qminrg)])
if qmaxrg is not None:
cmdline.extend(['--smaxrg', str(qmaxrg)])
result = execute_command(cmdline, noprint=noprint)
Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)
try:
curve
except NameError:
curve = Curve.new_from_file(filename)
else:
os.unlink(filename)
return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[
int(idxlast) - 1], float(quality), float(aggregation)
def datgnom(filename, Rg=None, noprint=True):
if Rg is None:
Rg, I0, idxfirst, idxlast, quality, aggregation = autorg(filename)
execute_command(['datgnom', filename, '-r', '%f' % float(Rg)],
noprint=noprint)
gnomoutputfilename = filename.rsplit('.', 1)[0] + '.out'
gnomdata, metadata = read_gnom_pr(gnomoutputfilename, get_metadata=True)
return gnomdata, metadata
def dammif(gnomoutputfilename, prefix=None, mode='fast', symmetry='P1', N=None,
noprint=True):
if prefix is None:
prefix = 'dammif_' + gnomoutputfilename.rsplit('.', 1)[0]
if N is None:
execute_command(['dammif', '--prefix=%s' % prefix, '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
return prefix + '-1.pdb'
else:
ret = []
for i in range(N):
execute_command(['dammif', '--prefix=%s_%03d' % (prefix, i), '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
ret.append('%s_%03d-1.pdb' % (prefix, i))
return ret
def shanum(filename, dmax=None, noprint=True):
"""Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels.
Inputs:
filename: either a name of an ascii file, or an instance
of Curve
dmax: the cut-off of the P(r) function, if known. If None,
this will be determined by the shanum program
noprint: if the printout of the program is to be suppressed.
Outputs: dmax, nsh, nopt, qmaxopt
dmax: the cut-off of the P(r) function.
nsh: the estimated number of Shannon channels
nopt: the optimum number of Shannon channels
qmaxopt: the optimum value of the high-q cutoff
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['shanum', filename]
if dmax is not None:
cmdline.append(str(float(dmax)))
result = execute_command(cmdline, noprint=noprint)
for l in result:
l = l.strip()
if l.startswith('Dmax='):
dmax = float(l.split('=')[1])
elif l.startswith('Smax='):
qmax = float(l.split('=')[1])
elif l.startswith('Nsh='):
nsh = float(l.split('=')[1])
elif l.startswith('Nopt='):
nopt = float(l.split('=')[1])
elif l.startswith('Sopt='):
qmaxopt = float(l.split('=')[1])
return dmax, nsh, nopt, qmaxopt
def bodies(filename, bodytypes=None, prefix=None, fit_timeout=10, Ndummyatoms=2000, noprint=True):
BODIES = ['ellipsoid', 'rotation-ellipsoid', 'cylinder', 'elliptic-cylinder', 'hollow-cylinder', 'parallelepiped',
'hollow-sphere', 'dumbbell']
if bodytypes is None:
bodytypes = BODIES
unknownbodies = [b for b in bodytypes if b not in BODIES]
if unknownbodies:
raise ValueError('Unknown body type(s): ' + ', '.join(unknownbodies))
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
assert (prefix is not None)
else:
if prefix is None:
prefix = filename.rsplit('.', 1)[0]
fittingresults = {}
for b in bodytypes:
print('Fitting geometrical body %s' % b, flush=True)
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'f\n%s\n%d\n\n\n\n\n\n\n%s\n' % (
filename.encode('utf-8'), BODIES.index(b) + 1, prefix.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Fitting timed out.')
continue
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr:
print('Error: ', stderr, flush=True)
printing_on = False
parameter_recording_on = False
bodyparameters = []
bodyparameternames = []
fittingresults[b] = {}
for s in stdout.split('\n'):
if s.startswith(' Input file name'):
printing_on = True
if printing_on and not noprint:
print(s, flush=True)
if s.startswith(' Body type'):
parameter_recording_on = True
if s.startswith(' Parameter \'scale\''):
parameter_recording_on = False
if parameter_recording_on and s.startswith(' Parameter \''):
bodyparameters.append(float(s.split(':')[1].strip()))
bodyparameternames.append(s[s.index("'") + 1:(s.index("'") + s[s.index("'") + 1:].index("'") + 1)])
if s.startswith(' Expected Radius of Gyration'):
fittingresults[b]['Rgexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected I0'):
fittingresults[b]['I0exp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected Volume'):
fittingresults[b]['Volexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Radius of Gyration'):
fittingresults[b]['Rgfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit I0'):
fittingresults[b]['I0fit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Volume'):
fittingresults[b]['Volfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Goodness of Fit (chi-square)'):
fittingresults[b]['Chi2'] = float(s.split(':')[1].strip())
if 'Chi2' not in fittingresults[b]:
print('Error: cannot open file {}'.format(filename))
return
fittingresults[b]['stdout_from_bodies'] = stdout
fittingresults[b]['type'] = b
fittingresults[b]['bodyparameters'] = bodyparameters
fittingresults[b]['bodyparameternames'] = bodyparameternames
print('Creating DAM model')
damoutputfile = prefix + '-' + b + '.pdb'
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'd\n%d\n' % (BODIES.index(b) + 1) + b'\n'.join(
[b'%6f' % (10 * v) for v in bodyparameters]) + b'\n1\n%d\n%s\n' % (
Ndummyatoms, damoutputfile.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Error creating DAM model.')
if stderr:
print(stderr)
tab = [['Body', 'Goodness of Fit ($\chi^2$)', 'Rg mismatch', 'I0 mismatch', 'Volume mismatch']]
for b in sorted(fittingresults):
tab.append([
fittingresults[b]['type'] + ' (' + ', '.join(
['%s=%.3f nm' % (var, val) for var, val in zip(fittingresults[b]['bodyparameternames'],
fittingresults[b]['bodyparameters'])]) + ')',
fittingresults[b]['Chi2'],
'%.2f nm' % (fittingresults[b]['Rgfit'] - fittingresults[b]['Rgexp']),
'%5g cm$^{-1}$ sr$^{-1}$' % (fittingresults[b]['I0fit'] - fittingresults[b]['I0exp']),
'%.2f nm^3' % (fittingresults[b]['Volfit'] - fittingresults[b]['Volexp']),
])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
return fittingresults
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'):
"""Run datcmp on the scattering curves.
Inputs:
*curves: scattering curves as positional arguments
alpha: confidence parameter
adjust: adjustment type (string), see the help of datcmp for details
test: test (string), see the help of datcmp for details
Outputs:
matC: the C matrix
matp: the matrix of the p values comparing the i-th and j-th exposure
matpadj: adjusted p-matrix of the exposures
ok: list of the same length as the number of curves. If True, the
given curve does not differ significantly from the others.
"""
if len({len(c) for c in curves}) != 1:
raise ValueError('All curves have to be of the same length.')
datcmpargs = []
if alpha is not None:
datcmpargs.append('--alpha=%f' % alpha)
if adjust is not None:
datcmpargs.append('--adjust=%s' % adjust)
if test is not None:
datcmpargs.append('--test=%s' % test)
with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td:
for i, c in enumerate(curves):
mat = np.zeros((len(c), 3))
mat[:, 0] = c.q
mat[:, 1] = c.Intensity
mat[:, 2] = c.Error
np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat)
matC = np.zeros((len(curves), len(curves))) + np.nan
matp = np.zeros((len(curves), len(curves))) + np.nan
matpadj = np.zeros((len(curves), len(curves))) + np.nan
ok = np.zeros(len(curves)) + np.nan
try:
results = subprocess.check_output(
['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode(
'utf-8')
except subprocess.CalledProcessError:
pass
else:
for l in results.split('\n'):
m = re.match(
'^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$',
l)
if m is not None:
i = int(m.group('i')) - 1
j = int(m.group('j')) - 1
matC[i, j] = matC[j, i] = float(m.group('C'))
matp[i, j] = matp[j, i] = float(m.group('p'))
matpadj[i, j] = matpadj[j, i] = float(m.group('adjp'))
else:
m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l)
if m is not None:
ok[int(m.group('i')) - 1] = (m.group('ack') == '*')
return matC, matp, matpadj, ok
def datporod(gnomoutfile):
"""Run datporod and return the estimated Porod volume.
Returns:
Radius of gyration found in the input file
I0 found in the input file
Vporod: the estimated Porod volume
"""
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return float(results[0]), float(results[1]), float(results[2])
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
"""
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata
|
awacha/credolib
|
credolib/atsas.py
|
datcmp
|
python
|
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'):
if len({len(c) for c in curves}) != 1:
raise ValueError('All curves have to be of the same length.')
datcmpargs = []
if alpha is not None:
datcmpargs.append('--alpha=%f' % alpha)
if adjust is not None:
datcmpargs.append('--adjust=%s' % adjust)
if test is not None:
datcmpargs.append('--test=%s' % test)
with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td:
for i, c in enumerate(curves):
mat = np.zeros((len(c), 3))
mat[:, 0] = c.q
mat[:, 1] = c.Intensity
mat[:, 2] = c.Error
np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat)
matC = np.zeros((len(curves), len(curves))) + np.nan
matp = np.zeros((len(curves), len(curves))) + np.nan
matpadj = np.zeros((len(curves), len(curves))) + np.nan
ok = np.zeros(len(curves)) + np.nan
try:
results = subprocess.check_output(
['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode(
'utf-8')
except subprocess.CalledProcessError:
pass
else:
for l in results.split('\n'):
m = re.match(
'^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$',
l)
if m is not None:
i = int(m.group('i')) - 1
j = int(m.group('j')) - 1
matC[i, j] = matC[j, i] = float(m.group('C'))
matp[i, j] = matp[j, i] = float(m.group('p'))
matpadj[i, j] = matpadj[j, i] = float(m.group('adjp'))
else:
m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l)
if m is not None:
ok[int(m.group('i')) - 1] = (m.group('ack') == '*')
return matC, matp, matpadj, ok
|
Run datcmp on the scattering curves.
Inputs:
*curves: scattering curves as positional arguments
alpha: confidence parameter
adjust: adjustment type (string), see the help of datcmp for details
test: test (string), see the help of datcmp for details
Outputs:
matC: the C matrix
matp: the matrix of the p values comparing the i-th and j-th exposure
matpadj: adjusted p-matrix of the exposures
ok: list of the same length as the number of curves. If True, the
given curve does not differ significantly from the others.
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L371-L428
| null |
__all__ = ['read_gnom_pr', 'execute_command', 'autorg', 'shanum', 'datgnom', 'dammif', 'bodies', 'datcmp', 'datporod',
'gnom']
import itertools
import os
import re
import shutil
import subprocess
import tempfile
import ipy_table
import numpy as np
from IPython.display import display
from sastool.classes2.curve import Curve
from sastool.misc.errorvalue import ErrorValue
def read_gnom_pr(filename, get_metadata=False):
metadata = {}
with open(filename, 'rt', encoding='utf-8') as f:
l = f.readline()
while 'Final results' not in l:
l = f.readline()
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip() == 'Parameter DISCRP OSCILL STABIL SYSDEV POSITV VALCEN')
parameters = {'DISCRP': {}, 'OSCILL': {}, 'STABIL': {}, 'SYSDEV': {}, 'POSITV': {}, 'VALCEN': {}}
for i in range(6):
line = f.readline().strip().split()
if i == 4:
# this line contains only a dashed line: "- - - - - - etc."
assert (all([l == '-' for l in line]))
continue
what = line[0]
(parameters['DISCRP'][what], parameters['OSCILL'][what],
parameters['STABIL'][what], parameters['SYSDEV'][what],
parameters['POSITV'][what], parameters['VALCEN'][what]) = tuple([
float(x) for x in line[1:]])
te = tw = 0
for p in parameters:
par = parameters[p]
par['Estimate_corrected'] = np.exp(-(par['Ideal'] - par['Current']) ** 2 / par['Sigma'] ** 2)
te += par['Estimate_corrected'] * par['Weight']
tw += par['Weight']
metadata['totalestimate_corrected'] = te / tw
metadata['parameters'] = parameters
assert (not f.readline().strip()) # skip empty line
match = re.match(r'Angular\s+range\s+:\s+from\s+(?P<qmin>\d+\.\d+)\s+to\s+(?P<qmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['qmin'] = float(match.groupdict()['qmin'])
metadata['qmax'] = float(match.groupdict()['qmax'])
match = re.match(r'Real\s+space\s+range\s+:\s+from\s+(?P<dmin>\d+\.\d+)\s+to\s+(?P<dmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['dmin'] = float(match.groupdict()['dmin'])
metadata['dmax'] = float(match.groupdict()['dmax'])
assert (not f.readline().strip())
match = re.match(r'Highest ALPHA \(theor\) :\s+(?P<highestalpha>\d+\.\d+E[+-]?\d+)', f.readline().strip())
assert (match is not None)
metadata['highestalpha'] = float(match.groupdict()['highestalpha'])
match = re.match(
r'Current ALPHA\s+:\s+(?P<currentalpha>\d+\.\d+E[+-]\d+)\s+Rg : (?P<Rg>\d+\.\d+E[+-]\d+)\s+I\(0\) :\s+(?P<I0>\d+\.\d+E[+-]\d+)',
f.readline().strip())
assert (match is not None)
metadata['currentalpha'] = float(match.groupdict()['currentalpha'])
metadata['Rg_guinier'] = float(match.groupdict()['Rg'])
metadata['I0_guinier'] = float(match.groupdict()['I0'])
assert (not f.readline().strip()) # skip empty line
match = re.match(
r'Total estimate : (?P<totalestimate>\d+\.\d+)\s+ which is \s+(?P<qualitystring>.*)\s+solution',
f.readline().strip())
assert (match is not None)
metadata['totalestimate'] = float(match.groupdict()['totalestimate'])
metadata['qualitystring'] = match.groupdict()['qualitystring']
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['S', 'J', 'EXP', 'ERROR', 'J', 'REG', 'I', 'REG'])
assert (not f.readline().strip()) # skip empty line
s = []
sj = []
jexp = []
jerror = []
jreg = []
ireg = []
l = f.readline()
while l.strip():
terms = [float(x) for x in l.strip().split()]
s.append(terms[0])
ireg.append(terms[-1])
if len(terms) > 2:
sj.append(terms[0])
jexp.append(terms[1])
jerror.append(terms[2])
jreg.append(terms[3])
l = f.readline()
metadata['q'] = np.array(s)
metadata['qj'] = np.array(sj)
metadata['jexp'] = np.array(jexp)
metadata['jerror'] = np.array(jerror)
metadata['jreg'] = np.array(jreg)
metadata['ireg'] = np.array(ireg)
assert ('Distance distribution function of particle' == f.readline().strip())
assert (not f.readline().strip()) # skip empty line
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['R', 'P(R)', 'ERROR'])
assert (not f.readline().strip()) # skip empty line
data = []
while True:
l = f.readline()
if not l.strip():
break
if not l.strip():
continue
try:
data.append([float(f_) for f_ in l.strip().split()])
except ValueError:
if 'Reciprocal space' in l:
break
except:
raise
l = f.readline()
match = re.match(
r'Real space: Rg =\s+(?P<Rg>\d+\.\d+(E[+-]?\d+)?) \+- (?P<dRg>\d+\.\d+(E[+-]?\d+)?)\s+I\(0\) =\s+(?P<I0>\d+\.\d+(E[+-]?\d+)?) \+-\s+(?P<dI0>\d+\.\d+(E[+-]?\d+)?)',
l.strip())
assert (match is not None)
metadata['Rg_gnom'] = ErrorValue(float(match.groupdict()['Rg']), float(match.groupdict()['dRg']))
metadata['I0_gnom'] = ErrorValue(float(match.groupdict()['I0']), float(match.groupdict()['dI0']))
if get_metadata:
return (np.array(data), metadata)
else:
return (np.array(data),)
def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):
if isinstance(input_to_command, str):
stdin = subprocess.PIPE
else:
stdin = input_to_command
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)
if (isinstance(input_to_command, str)):
input_to_command = input_to_command.encode('utf-8')
if isinstance(input_to_command, bytes):
popen.stdin.write(input_to_command)
lines_iterator = itertools.chain(popen.stdout, popen.stderr)
resultinglines = []
for line in lines_iterator:
if not noprint:
if not eat_output:
print(str(line[:-1], encoding='utf-8'), flush=True)
else:
print(".", end='', flush=True)
resultinglines.append(str(line[:-1], encoding='utf-8'))
return resultinglines
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):
"""Execute autorg.
Inputs:
filename: either a name of an ascii file, or an instance of Curve.
mininterval: the minimum number of points in the Guinier range
qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0
qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3
noprint: if the output of autorg should be redirected to the null
device.
Outputs:
Rg as an ErrorValue
I0 as an ErrorValue
qmin: the lower end of the chosen Guinier range
qmax: the upper end of the chosen Guinier range
quality: the quality parameter, between 0 and 1
aggregation: float, the extent of aggregation
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b',
delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['autorg', filename, '-f', 'ssv']
if mininterval is not None:
cmdline.extend(['--mininterval', str(mininterval)])
if qminrg is not None:
cmdline.extend(['--sminrg', str(qminrg)])
if qmaxrg is not None:
cmdline.extend(['--smaxrg', str(qmaxrg)])
result = execute_command(cmdline, noprint=noprint)
Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)
try:
curve
except NameError:
curve = Curve.new_from_file(filename)
else:
os.unlink(filename)
return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[
int(idxlast) - 1], float(quality), float(aggregation)
def datgnom(filename, Rg=None, noprint=True):
if Rg is None:
Rg, I0, idxfirst, idxlast, quality, aggregation = autorg(filename)
execute_command(['datgnom', filename, '-r', '%f' % float(Rg)],
noprint=noprint)
gnomoutputfilename = filename.rsplit('.', 1)[0] + '.out'
gnomdata, metadata = read_gnom_pr(gnomoutputfilename, get_metadata=True)
return gnomdata, metadata
def dammif(gnomoutputfilename, prefix=None, mode='fast', symmetry='P1', N=None,
noprint=True):
if prefix is None:
prefix = 'dammif_' + gnomoutputfilename.rsplit('.', 1)[0]
if N is None:
execute_command(['dammif', '--prefix=%s' % prefix, '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
return prefix + '-1.pdb'
else:
ret = []
for i in range(N):
execute_command(['dammif', '--prefix=%s_%03d' % (prefix, i), '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
ret.append('%s_%03d-1.pdb' % (prefix, i))
return ret
def shanum(filename, dmax=None, noprint=True):
"""Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels.
Inputs:
filename: either a name of an ascii file, or an instance
of Curve
dmax: the cut-off of the P(r) function, if known. If None,
this will be determined by the shanum program
noprint: if the printout of the program is to be suppressed.
Outputs: dmax, nsh, nopt, qmaxopt
dmax: the cut-off of the P(r) function.
nsh: the estimated number of Shannon channels
nopt: the optimum number of Shannon channels
qmaxopt: the optimum value of the high-q cutoff
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['shanum', filename]
if dmax is not None:
cmdline.append(str(float(dmax)))
result = execute_command(cmdline, noprint=noprint)
for l in result:
l = l.strip()
if l.startswith('Dmax='):
dmax = float(l.split('=')[1])
elif l.startswith('Smax='):
qmax = float(l.split('=')[1])
elif l.startswith('Nsh='):
nsh = float(l.split('=')[1])
elif l.startswith('Nopt='):
nopt = float(l.split('=')[1])
elif l.startswith('Sopt='):
qmaxopt = float(l.split('=')[1])
return dmax, nsh, nopt, qmaxopt
def bodies(filename, bodytypes=None, prefix=None, fit_timeout=10, Ndummyatoms=2000, noprint=True):
BODIES = ['ellipsoid', 'rotation-ellipsoid', 'cylinder', 'elliptic-cylinder', 'hollow-cylinder', 'parallelepiped',
'hollow-sphere', 'dumbbell']
if bodytypes is None:
bodytypes = BODIES
unknownbodies = [b for b in bodytypes if b not in BODIES]
if unknownbodies:
raise ValueError('Unknown body type(s): ' + ', '.join(unknownbodies))
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
assert (prefix is not None)
else:
if prefix is None:
prefix = filename.rsplit('.', 1)[0]
fittingresults = {}
for b in bodytypes:
print('Fitting geometrical body %s' % b, flush=True)
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'f\n%s\n%d\n\n\n\n\n\n\n%s\n' % (
filename.encode('utf-8'), BODIES.index(b) + 1, prefix.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Fitting timed out.')
continue
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr:
print('Error: ', stderr, flush=True)
printing_on = False
parameter_recording_on = False
bodyparameters = []
bodyparameternames = []
fittingresults[b] = {}
for s in stdout.split('\n'):
if s.startswith(' Input file name'):
printing_on = True
if printing_on and not noprint:
print(s, flush=True)
if s.startswith(' Body type'):
parameter_recording_on = True
if s.startswith(' Parameter \'scale\''):
parameter_recording_on = False
if parameter_recording_on and s.startswith(' Parameter \''):
bodyparameters.append(float(s.split(':')[1].strip()))
bodyparameternames.append(s[s.index("'") + 1:(s.index("'") + s[s.index("'") + 1:].index("'") + 1)])
if s.startswith(' Expected Radius of Gyration'):
fittingresults[b]['Rgexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected I0'):
fittingresults[b]['I0exp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected Volume'):
fittingresults[b]['Volexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Radius of Gyration'):
fittingresults[b]['Rgfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit I0'):
fittingresults[b]['I0fit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Volume'):
fittingresults[b]['Volfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Goodness of Fit (chi-square)'):
fittingresults[b]['Chi2'] = float(s.split(':')[1].strip())
if 'Chi2' not in fittingresults[b]:
print('Error: cannot open file {}'.format(filename))
return
fittingresults[b]['stdout_from_bodies'] = stdout
fittingresults[b]['type'] = b
fittingresults[b]['bodyparameters'] = bodyparameters
fittingresults[b]['bodyparameternames'] = bodyparameternames
print('Creating DAM model')
damoutputfile = prefix + '-' + b + '.pdb'
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'd\n%d\n' % (BODIES.index(b) + 1) + b'\n'.join(
[b'%6f' % (10 * v) for v in bodyparameters]) + b'\n1\n%d\n%s\n' % (
Ndummyatoms, damoutputfile.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Error creating DAM model.')
if stderr:
print(stderr)
tab = [['Body', 'Goodness of Fit ($\chi^2$)', 'Rg mismatch', 'I0 mismatch', 'Volume mismatch']]
for b in sorted(fittingresults):
tab.append([
fittingresults[b]['type'] + ' (' + ', '.join(
['%s=%.3f nm' % (var, val) for var, val in zip(fittingresults[b]['bodyparameternames'],
fittingresults[b]['bodyparameters'])]) + ')',
fittingresults[b]['Chi2'],
'%.2f nm' % (fittingresults[b]['Rgfit'] - fittingresults[b]['Rgexp']),
'%5g cm$^{-1}$ sr$^{-1}$' % (fittingresults[b]['I0fit'] - fittingresults[b]['I0exp']),
'%.2f nm^3' % (fittingresults[b]['Volfit'] - fittingresults[b]['Volexp']),
])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
return fittingresults
def datporod(gnomoutfile):
"""Run datporod and return the estimated Porod volume.
Returns:
Radius of gyration found in the input file
I0 found in the input file
Vporod: the estimated Porod volume
"""
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return float(results[0]), float(results[1]), float(results[2])
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
"""
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata
|
awacha/credolib
|
credolib/atsas.py
|
datporod
|
python
|
def datporod(gnomoutfile):
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return float(results[0]), float(results[1]), float(results[2])
|
Run datporod and return the estimated Porod volume.
Returns:
Radius of gyration found in the input file
I0 found in the input file
Vporod: the estimated Porod volume
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L431-L440
| null |
__all__ = ['read_gnom_pr', 'execute_command', 'autorg', 'shanum', 'datgnom', 'dammif', 'bodies', 'datcmp', 'datporod',
'gnom']
import itertools
import os
import re
import shutil
import subprocess
import tempfile
import ipy_table
import numpy as np
from IPython.display import display
from sastool.classes2.curve import Curve
from sastool.misc.errorvalue import ErrorValue
def read_gnom_pr(filename, get_metadata=False):
metadata = {}
with open(filename, 'rt', encoding='utf-8') as f:
l = f.readline()
while 'Final results' not in l:
l = f.readline()
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip() == 'Parameter DISCRP OSCILL STABIL SYSDEV POSITV VALCEN')
parameters = {'DISCRP': {}, 'OSCILL': {}, 'STABIL': {}, 'SYSDEV': {}, 'POSITV': {}, 'VALCEN': {}}
for i in range(6):
line = f.readline().strip().split()
if i == 4:
# this line contains only a dashed line: "- - - - - - etc."
assert (all([l == '-' for l in line]))
continue
what = line[0]
(parameters['DISCRP'][what], parameters['OSCILL'][what],
parameters['STABIL'][what], parameters['SYSDEV'][what],
parameters['POSITV'][what], parameters['VALCEN'][what]) = tuple([
float(x) for x in line[1:]])
te = tw = 0
for p in parameters:
par = parameters[p]
par['Estimate_corrected'] = np.exp(-(par['Ideal'] - par['Current']) ** 2 / par['Sigma'] ** 2)
te += par['Estimate_corrected'] * par['Weight']
tw += par['Weight']
metadata['totalestimate_corrected'] = te / tw
metadata['parameters'] = parameters
assert (not f.readline().strip()) # skip empty line
match = re.match(r'Angular\s+range\s+:\s+from\s+(?P<qmin>\d+\.\d+)\s+to\s+(?P<qmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['qmin'] = float(match.groupdict()['qmin'])
metadata['qmax'] = float(match.groupdict()['qmax'])
match = re.match(r'Real\s+space\s+range\s+:\s+from\s+(?P<dmin>\d+\.\d+)\s+to\s+(?P<dmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['dmin'] = float(match.groupdict()['dmin'])
metadata['dmax'] = float(match.groupdict()['dmax'])
assert (not f.readline().strip())
match = re.match(r'Highest ALPHA \(theor\) :\s+(?P<highestalpha>\d+\.\d+E[+-]?\d+)', f.readline().strip())
assert (match is not None)
metadata['highestalpha'] = float(match.groupdict()['highestalpha'])
match = re.match(
r'Current ALPHA\s+:\s+(?P<currentalpha>\d+\.\d+E[+-]\d+)\s+Rg : (?P<Rg>\d+\.\d+E[+-]\d+)\s+I\(0\) :\s+(?P<I0>\d+\.\d+E[+-]\d+)',
f.readline().strip())
assert (match is not None)
metadata['currentalpha'] = float(match.groupdict()['currentalpha'])
metadata['Rg_guinier'] = float(match.groupdict()['Rg'])
metadata['I0_guinier'] = float(match.groupdict()['I0'])
assert (not f.readline().strip()) # skip empty line
match = re.match(
r'Total estimate : (?P<totalestimate>\d+\.\d+)\s+ which is \s+(?P<qualitystring>.*)\s+solution',
f.readline().strip())
assert (match is not None)
metadata['totalestimate'] = float(match.groupdict()['totalestimate'])
metadata['qualitystring'] = match.groupdict()['qualitystring']
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['S', 'J', 'EXP', 'ERROR', 'J', 'REG', 'I', 'REG'])
assert (not f.readline().strip()) # skip empty line
s = []
sj = []
jexp = []
jerror = []
jreg = []
ireg = []
l = f.readline()
while l.strip():
terms = [float(x) for x in l.strip().split()]
s.append(terms[0])
ireg.append(terms[-1])
if len(terms) > 2:
sj.append(terms[0])
jexp.append(terms[1])
jerror.append(terms[2])
jreg.append(terms[3])
l = f.readline()
metadata['q'] = np.array(s)
metadata['qj'] = np.array(sj)
metadata['jexp'] = np.array(jexp)
metadata['jerror'] = np.array(jerror)
metadata['jreg'] = np.array(jreg)
metadata['ireg'] = np.array(ireg)
assert ('Distance distribution function of particle' == f.readline().strip())
assert (not f.readline().strip()) # skip empty line
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['R', 'P(R)', 'ERROR'])
assert (not f.readline().strip()) # skip empty line
data = []
while True:
l = f.readline()
if not l.strip():
break
if not l.strip():
continue
try:
data.append([float(f_) for f_ in l.strip().split()])
except ValueError:
if 'Reciprocal space' in l:
break
except:
raise
l = f.readline()
match = re.match(
r'Real space: Rg =\s+(?P<Rg>\d+\.\d+(E[+-]?\d+)?) \+- (?P<dRg>\d+\.\d+(E[+-]?\d+)?)\s+I\(0\) =\s+(?P<I0>\d+\.\d+(E[+-]?\d+)?) \+-\s+(?P<dI0>\d+\.\d+(E[+-]?\d+)?)',
l.strip())
assert (match is not None)
metadata['Rg_gnom'] = ErrorValue(float(match.groupdict()['Rg']), float(match.groupdict()['dRg']))
metadata['I0_gnom'] = ErrorValue(float(match.groupdict()['I0']), float(match.groupdict()['dI0']))
if get_metadata:
return (np.array(data), metadata)
else:
return (np.array(data),)
def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):
if isinstance(input_to_command, str):
stdin = subprocess.PIPE
else:
stdin = input_to_command
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)
if (isinstance(input_to_command, str)):
input_to_command = input_to_command.encode('utf-8')
if isinstance(input_to_command, bytes):
popen.stdin.write(input_to_command)
lines_iterator = itertools.chain(popen.stdout, popen.stderr)
resultinglines = []
for line in lines_iterator:
if not noprint:
if not eat_output:
print(str(line[:-1], encoding='utf-8'), flush=True)
else:
print(".", end='', flush=True)
resultinglines.append(str(line[:-1], encoding='utf-8'))
return resultinglines
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):
"""Execute autorg.
Inputs:
filename: either a name of an ascii file, or an instance of Curve.
mininterval: the minimum number of points in the Guinier range
qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0
qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3
noprint: if the output of autorg should be redirected to the null
device.
Outputs:
Rg as an ErrorValue
I0 as an ErrorValue
qmin: the lower end of the chosen Guinier range
qmax: the upper end of the chosen Guinier range
quality: the quality parameter, between 0 and 1
aggregation: float, the extent of aggregation
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b',
delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['autorg', filename, '-f', 'ssv']
if mininterval is not None:
cmdline.extend(['--mininterval', str(mininterval)])
if qminrg is not None:
cmdline.extend(['--sminrg', str(qminrg)])
if qmaxrg is not None:
cmdline.extend(['--smaxrg', str(qmaxrg)])
result = execute_command(cmdline, noprint=noprint)
Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)
try:
curve
except NameError:
curve = Curve.new_from_file(filename)
else:
os.unlink(filename)
return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[
int(idxlast) - 1], float(quality), float(aggregation)
def datgnom(filename, Rg=None, noprint=True):
if Rg is None:
Rg, I0, idxfirst, idxlast, quality, aggregation = autorg(filename)
execute_command(['datgnom', filename, '-r', '%f' % float(Rg)],
noprint=noprint)
gnomoutputfilename = filename.rsplit('.', 1)[0] + '.out'
gnomdata, metadata = read_gnom_pr(gnomoutputfilename, get_metadata=True)
return gnomdata, metadata
def dammif(gnomoutputfilename, prefix=None, mode='fast', symmetry='P1', N=None,
noprint=True):
if prefix is None:
prefix = 'dammif_' + gnomoutputfilename.rsplit('.', 1)[0]
if N is None:
execute_command(['dammif', '--prefix=%s' % prefix, '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
return prefix + '-1.pdb'
else:
ret = []
for i in range(N):
execute_command(['dammif', '--prefix=%s_%03d' % (prefix, i), '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
ret.append('%s_%03d-1.pdb' % (prefix, i))
return ret
def shanum(filename, dmax=None, noprint=True):
"""Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels.
Inputs:
filename: either a name of an ascii file, or an instance
of Curve
dmax: the cut-off of the P(r) function, if known. If None,
this will be determined by the shanum program
noprint: if the printout of the program is to be suppressed.
Outputs: dmax, nsh, nopt, qmaxopt
dmax: the cut-off of the P(r) function.
nsh: the estimated number of Shannon channels
nopt: the optimum number of Shannon channels
qmaxopt: the optimum value of the high-q cutoff
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['shanum', filename]
if dmax is not None:
cmdline.append(str(float(dmax)))
result = execute_command(cmdline, noprint=noprint)
for l in result:
l = l.strip()
if l.startswith('Dmax='):
dmax = float(l.split('=')[1])
elif l.startswith('Smax='):
qmax = float(l.split('=')[1])
elif l.startswith('Nsh='):
nsh = float(l.split('=')[1])
elif l.startswith('Nopt='):
nopt = float(l.split('=')[1])
elif l.startswith('Sopt='):
qmaxopt = float(l.split('=')[1])
return dmax, nsh, nopt, qmaxopt
def bodies(filename, bodytypes=None, prefix=None, fit_timeout=10, Ndummyatoms=2000, noprint=True):
BODIES = ['ellipsoid', 'rotation-ellipsoid', 'cylinder', 'elliptic-cylinder', 'hollow-cylinder', 'parallelepiped',
'hollow-sphere', 'dumbbell']
if bodytypes is None:
bodytypes = BODIES
unknownbodies = [b for b in bodytypes if b not in BODIES]
if unknownbodies:
raise ValueError('Unknown body type(s): ' + ', '.join(unknownbodies))
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
assert (prefix is not None)
else:
if prefix is None:
prefix = filename.rsplit('.', 1)[0]
fittingresults = {}
for b in bodytypes:
print('Fitting geometrical body %s' % b, flush=True)
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'f\n%s\n%d\n\n\n\n\n\n\n%s\n' % (
filename.encode('utf-8'), BODIES.index(b) + 1, prefix.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Fitting timed out.')
continue
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr:
print('Error: ', stderr, flush=True)
printing_on = False
parameter_recording_on = False
bodyparameters = []
bodyparameternames = []
fittingresults[b] = {}
for s in stdout.split('\n'):
if s.startswith(' Input file name'):
printing_on = True
if printing_on and not noprint:
print(s, flush=True)
if s.startswith(' Body type'):
parameter_recording_on = True
if s.startswith(' Parameter \'scale\''):
parameter_recording_on = False
if parameter_recording_on and s.startswith(' Parameter \''):
bodyparameters.append(float(s.split(':')[1].strip()))
bodyparameternames.append(s[s.index("'") + 1:(s.index("'") + s[s.index("'") + 1:].index("'") + 1)])
if s.startswith(' Expected Radius of Gyration'):
fittingresults[b]['Rgexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected I0'):
fittingresults[b]['I0exp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected Volume'):
fittingresults[b]['Volexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Radius of Gyration'):
fittingresults[b]['Rgfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit I0'):
fittingresults[b]['I0fit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Volume'):
fittingresults[b]['Volfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Goodness of Fit (chi-square)'):
fittingresults[b]['Chi2'] = float(s.split(':')[1].strip())
if 'Chi2' not in fittingresults[b]:
print('Error: cannot open file {}'.format(filename))
return
fittingresults[b]['stdout_from_bodies'] = stdout
fittingresults[b]['type'] = b
fittingresults[b]['bodyparameters'] = bodyparameters
fittingresults[b]['bodyparameternames'] = bodyparameternames
print('Creating DAM model')
damoutputfile = prefix + '-' + b + '.pdb'
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'd\n%d\n' % (BODIES.index(b) + 1) + b'\n'.join(
[b'%6f' % (10 * v) for v in bodyparameters]) + b'\n1\n%d\n%s\n' % (
Ndummyatoms, damoutputfile.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Error creating DAM model.')
if stderr:
print(stderr)
tab = [['Body', 'Goodness of Fit ($\chi^2$)', 'Rg mismatch', 'I0 mismatch', 'Volume mismatch']]
for b in sorted(fittingresults):
tab.append([
fittingresults[b]['type'] + ' (' + ', '.join(
['%s=%.3f nm' % (var, val) for var, val in zip(fittingresults[b]['bodyparameternames'],
fittingresults[b]['bodyparameters'])]) + ')',
fittingresults[b]['Chi2'],
'%.2f nm' % (fittingresults[b]['Rgfit'] - fittingresults[b]['Rgexp']),
'%5g cm$^{-1}$ sr$^{-1}$' % (fittingresults[b]['I0fit'] - fittingresults[b]['I0exp']),
'%.2f nm^3' % (fittingresults[b]['Volfit'] - fittingresults[b]['Volexp']),
])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
return fittingresults
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'):
"""Run datcmp on the scattering curves.
Inputs:
*curves: scattering curves as positional arguments
alpha: confidence parameter
adjust: adjustment type (string), see the help of datcmp for details
test: test (string), see the help of datcmp for details
Outputs:
matC: the C matrix
matp: the matrix of the p values comparing the i-th and j-th exposure
matpadj: adjusted p-matrix of the exposures
ok: list of the same length as the number of curves. If True, the
given curve does not differ significantly from the others.
"""
if len({len(c) for c in curves}) != 1:
raise ValueError('All curves have to be of the same length.')
datcmpargs = []
if alpha is not None:
datcmpargs.append('--alpha=%f' % alpha)
if adjust is not None:
datcmpargs.append('--adjust=%s' % adjust)
if test is not None:
datcmpargs.append('--test=%s' % test)
with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td:
for i, c in enumerate(curves):
mat = np.zeros((len(c), 3))
mat[:, 0] = c.q
mat[:, 1] = c.Intensity
mat[:, 2] = c.Error
np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat)
matC = np.zeros((len(curves), len(curves))) + np.nan
matp = np.zeros((len(curves), len(curves))) + np.nan
matpadj = np.zeros((len(curves), len(curves))) + np.nan
ok = np.zeros(len(curves)) + np.nan
try:
results = subprocess.check_output(
['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode(
'utf-8')
except subprocess.CalledProcessError:
pass
else:
for l in results.split('\n'):
m = re.match(
'^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$',
l)
if m is not None:
i = int(m.group('i')) - 1
j = int(m.group('j')) - 1
matC[i, j] = matC[j, i] = float(m.group('C'))
matp[i, j] = matp[j, i] = float(m.group('p'))
matpadj[i, j] = matpadj[j, i] = float(m.group('adjp'))
else:
m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l)
if m is not None:
ok[int(m.group('i')) - 1] = (m.group('ack') == '*')
return matC, matp, matpadj, ok
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
"""
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata
|
awacha/credolib
|
credolib/atsas.py
|
gnom
|
python
|
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata
|
Run GNOM on the dataset.
Inputs:
curve: an instance of sastool.classes2.Curve or anything which has a
save() method, saving the scattering curve to a given .dat file,
in q=4*pi*sin(theta)/lambda [1/nm] units
Rmax: the estimated maximum extent of the scattering object, in nm.
outputfilename: the preferred name of the output file. If not given,
the .out file produced by gnom will be lost.
Npoints_realspace: the expected number of points in the real space
initial_alpha: the initial value of the regularization parameter.
Outputs:
the same as of read_gnom_pr()
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/atsas.py#L443-L518
|
[
"def read_gnom_pr(filename, get_metadata=False):\n metadata = {}\n with open(filename, 'rt', encoding='utf-8') as f:\n l = f.readline()\n while 'Final results' not in l:\n l = f.readline()\n assert (not f.readline().strip()) # skip empty line\n assert (f.readline().strip() == 'Parameter DISCRP OSCILL STABIL SYSDEV POSITV VALCEN')\n parameters = {'DISCRP': {}, 'OSCILL': {}, 'STABIL': {}, 'SYSDEV': {}, 'POSITV': {}, 'VALCEN': {}}\n for i in range(6):\n line = f.readline().strip().split()\n if i == 4:\n # this line contains only a dashed line: \"- - - - - - etc.\"\n assert (all([l == '-' for l in line]))\n continue\n what = line[0]\n (parameters['DISCRP'][what], parameters['OSCILL'][what],\n parameters['STABIL'][what], parameters['SYSDEV'][what],\n parameters['POSITV'][what], parameters['VALCEN'][what]) = tuple([\n float(x) for x in line[1:]])\n te = tw = 0\n for p in parameters:\n par = parameters[p]\n par['Estimate_corrected'] = np.exp(-(par['Ideal'] - par['Current']) ** 2 / par['Sigma'] ** 2)\n te += par['Estimate_corrected'] * par['Weight']\n tw += par['Weight']\n metadata['totalestimate_corrected'] = te / tw\n\n metadata['parameters'] = parameters\n assert (not f.readline().strip()) # skip empty line\n match = re.match(r'Angular\\s+range\\s+:\\s+from\\s+(?P<qmin>\\d+\\.\\d+)\\s+to\\s+(?P<qmax>\\d+\\.\\d+)',\n f.readline().strip())\n assert (match is not None)\n metadata['qmin'] = float(match.groupdict()['qmin'])\n metadata['qmax'] = float(match.groupdict()['qmax'])\n match = re.match(r'Real\\s+space\\s+range\\s+:\\s+from\\s+(?P<dmin>\\d+\\.\\d+)\\s+to\\s+(?P<dmax>\\d+\\.\\d+)',\n f.readline().strip())\n assert (match is not None)\n metadata['dmin'] = float(match.groupdict()['dmin'])\n metadata['dmax'] = float(match.groupdict()['dmax'])\n assert (not f.readline().strip())\n match = re.match(r'Highest ALPHA \\(theor\\) :\\s+(?P<highestalpha>\\d+\\.\\d+E[+-]?\\d+)', f.readline().strip())\n assert (match is not None)\n metadata['highestalpha'] = float(match.groupdict()['highestalpha'])\n match = re.match(\n r'Current ALPHA\\s+:\\s+(?P<currentalpha>\\d+\\.\\d+E[+-]\\d+)\\s+Rg : (?P<Rg>\\d+\\.\\d+E[+-]\\d+)\\s+I\\(0\\) :\\s+(?P<I0>\\d+\\.\\d+E[+-]\\d+)',\n f.readline().strip())\n assert (match is not None)\n metadata['currentalpha'] = float(match.groupdict()['currentalpha'])\n metadata['Rg_guinier'] = float(match.groupdict()['Rg'])\n metadata['I0_guinier'] = float(match.groupdict()['I0'])\n assert (not f.readline().strip()) # skip empty line\n match = re.match(\n r'Total estimate : (?P<totalestimate>\\d+\\.\\d+)\\s+ which is \\s+(?P<qualitystring>.*)\\s+solution',\n f.readline().strip())\n assert (match is not None)\n metadata['totalestimate'] = float(match.groupdict()['totalestimate'])\n metadata['qualitystring'] = match.groupdict()['qualitystring']\n assert (not f.readline().strip()) # skip empty line\n assert (f.readline().strip().split() == ['S', 'J', 'EXP', 'ERROR', 'J', 'REG', 'I', 'REG'])\n assert (not f.readline().strip()) # skip empty line\n s = []\n sj = []\n jexp = []\n jerror = []\n jreg = []\n ireg = []\n l = f.readline()\n while l.strip():\n terms = [float(x) for x in l.strip().split()]\n s.append(terms[0])\n ireg.append(terms[-1])\n if len(terms) > 2:\n sj.append(terms[0])\n jexp.append(terms[1])\n jerror.append(terms[2])\n jreg.append(terms[3])\n l = f.readline()\n metadata['q'] = np.array(s)\n metadata['qj'] = np.array(sj)\n metadata['jexp'] = np.array(jexp)\n metadata['jerror'] = np.array(jerror)\n metadata['jreg'] = np.array(jreg)\n metadata['ireg'] = np.array(ireg)\n assert ('Distance distribution function of particle' == f.readline().strip())\n assert (not f.readline().strip()) # skip empty line\n assert (not f.readline().strip()) # skip empty line\n assert (f.readline().strip().split() == ['R', 'P(R)', 'ERROR'])\n assert (not f.readline().strip()) # skip empty line\n\n data = []\n while True:\n l = f.readline()\n if not l.strip():\n break\n if not l.strip():\n continue\n try:\n data.append([float(f_) for f_ in l.strip().split()])\n except ValueError:\n if 'Reciprocal space' in l:\n break\n except:\n raise\n l = f.readline()\n match = re.match(\n r'Real space: Rg =\\s+(?P<Rg>\\d+\\.\\d+(E[+-]?\\d+)?) \\+- (?P<dRg>\\d+\\.\\d+(E[+-]?\\d+)?)\\s+I\\(0\\) =\\s+(?P<I0>\\d+\\.\\d+(E[+-]?\\d+)?) \\+-\\s+(?P<dI0>\\d+\\.\\d+(E[+-]?\\d+)?)',\n l.strip())\n assert (match is not None)\n metadata['Rg_gnom'] = ErrorValue(float(match.groupdict()['Rg']), float(match.groupdict()['dRg']))\n metadata['I0_gnom'] = ErrorValue(float(match.groupdict()['I0']), float(match.groupdict()['dI0']))\n if get_metadata:\n return (np.array(data), metadata)\n else:\n return (np.array(data),)\n"
] |
__all__ = ['read_gnom_pr', 'execute_command', 'autorg', 'shanum', 'datgnom', 'dammif', 'bodies', 'datcmp', 'datporod',
'gnom']
import itertools
import os
import re
import shutil
import subprocess
import tempfile
import ipy_table
import numpy as np
from IPython.display import display
from sastool.classes2.curve import Curve
from sastool.misc.errorvalue import ErrorValue
def read_gnom_pr(filename, get_metadata=False):
metadata = {}
with open(filename, 'rt', encoding='utf-8') as f:
l = f.readline()
while 'Final results' not in l:
l = f.readline()
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip() == 'Parameter DISCRP OSCILL STABIL SYSDEV POSITV VALCEN')
parameters = {'DISCRP': {}, 'OSCILL': {}, 'STABIL': {}, 'SYSDEV': {}, 'POSITV': {}, 'VALCEN': {}}
for i in range(6):
line = f.readline().strip().split()
if i == 4:
# this line contains only a dashed line: "- - - - - - etc."
assert (all([l == '-' for l in line]))
continue
what = line[0]
(parameters['DISCRP'][what], parameters['OSCILL'][what],
parameters['STABIL'][what], parameters['SYSDEV'][what],
parameters['POSITV'][what], parameters['VALCEN'][what]) = tuple([
float(x) for x in line[1:]])
te = tw = 0
for p in parameters:
par = parameters[p]
par['Estimate_corrected'] = np.exp(-(par['Ideal'] - par['Current']) ** 2 / par['Sigma'] ** 2)
te += par['Estimate_corrected'] * par['Weight']
tw += par['Weight']
metadata['totalestimate_corrected'] = te / tw
metadata['parameters'] = parameters
assert (not f.readline().strip()) # skip empty line
match = re.match(r'Angular\s+range\s+:\s+from\s+(?P<qmin>\d+\.\d+)\s+to\s+(?P<qmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['qmin'] = float(match.groupdict()['qmin'])
metadata['qmax'] = float(match.groupdict()['qmax'])
match = re.match(r'Real\s+space\s+range\s+:\s+from\s+(?P<dmin>\d+\.\d+)\s+to\s+(?P<dmax>\d+\.\d+)',
f.readline().strip())
assert (match is not None)
metadata['dmin'] = float(match.groupdict()['dmin'])
metadata['dmax'] = float(match.groupdict()['dmax'])
assert (not f.readline().strip())
match = re.match(r'Highest ALPHA \(theor\) :\s+(?P<highestalpha>\d+\.\d+E[+-]?\d+)', f.readline().strip())
assert (match is not None)
metadata['highestalpha'] = float(match.groupdict()['highestalpha'])
match = re.match(
r'Current ALPHA\s+:\s+(?P<currentalpha>\d+\.\d+E[+-]\d+)\s+Rg : (?P<Rg>\d+\.\d+E[+-]\d+)\s+I\(0\) :\s+(?P<I0>\d+\.\d+E[+-]\d+)',
f.readline().strip())
assert (match is not None)
metadata['currentalpha'] = float(match.groupdict()['currentalpha'])
metadata['Rg_guinier'] = float(match.groupdict()['Rg'])
metadata['I0_guinier'] = float(match.groupdict()['I0'])
assert (not f.readline().strip()) # skip empty line
match = re.match(
r'Total estimate : (?P<totalestimate>\d+\.\d+)\s+ which is \s+(?P<qualitystring>.*)\s+solution',
f.readline().strip())
assert (match is not None)
metadata['totalestimate'] = float(match.groupdict()['totalestimate'])
metadata['qualitystring'] = match.groupdict()['qualitystring']
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['S', 'J', 'EXP', 'ERROR', 'J', 'REG', 'I', 'REG'])
assert (not f.readline().strip()) # skip empty line
s = []
sj = []
jexp = []
jerror = []
jreg = []
ireg = []
l = f.readline()
while l.strip():
terms = [float(x) for x in l.strip().split()]
s.append(terms[0])
ireg.append(terms[-1])
if len(terms) > 2:
sj.append(terms[0])
jexp.append(terms[1])
jerror.append(terms[2])
jreg.append(terms[3])
l = f.readline()
metadata['q'] = np.array(s)
metadata['qj'] = np.array(sj)
metadata['jexp'] = np.array(jexp)
metadata['jerror'] = np.array(jerror)
metadata['jreg'] = np.array(jreg)
metadata['ireg'] = np.array(ireg)
assert ('Distance distribution function of particle' == f.readline().strip())
assert (not f.readline().strip()) # skip empty line
assert (not f.readline().strip()) # skip empty line
assert (f.readline().strip().split() == ['R', 'P(R)', 'ERROR'])
assert (not f.readline().strip()) # skip empty line
data = []
while True:
l = f.readline()
if not l.strip():
break
if not l.strip():
continue
try:
data.append([float(f_) for f_ in l.strip().split()])
except ValueError:
if 'Reciprocal space' in l:
break
except:
raise
l = f.readline()
match = re.match(
r'Real space: Rg =\s+(?P<Rg>\d+\.\d+(E[+-]?\d+)?) \+- (?P<dRg>\d+\.\d+(E[+-]?\d+)?)\s+I\(0\) =\s+(?P<I0>\d+\.\d+(E[+-]?\d+)?) \+-\s+(?P<dI0>\d+\.\d+(E[+-]?\d+)?)',
l.strip())
assert (match is not None)
metadata['Rg_gnom'] = ErrorValue(float(match.groupdict()['Rg']), float(match.groupdict()['dRg']))
metadata['I0_gnom'] = ErrorValue(float(match.groupdict()['I0']), float(match.groupdict()['dI0']))
if get_metadata:
return (np.array(data), metadata)
else:
return (np.array(data),)
def execute_command(cmd, input_to_command=None, eat_output=False, noprint=False):
if isinstance(input_to_command, str):
stdin = subprocess.PIPE
else:
stdin = input_to_command
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=stdin)
if (isinstance(input_to_command, str)):
input_to_command = input_to_command.encode('utf-8')
if isinstance(input_to_command, bytes):
popen.stdin.write(input_to_command)
lines_iterator = itertools.chain(popen.stdout, popen.stderr)
resultinglines = []
for line in lines_iterator:
if not noprint:
if not eat_output:
print(str(line[:-1], encoding='utf-8'), flush=True)
else:
print(".", end='', flush=True)
resultinglines.append(str(line[:-1], encoding='utf-8'))
return resultinglines
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):
"""Execute autorg.
Inputs:
filename: either a name of an ascii file, or an instance of Curve.
mininterval: the minimum number of points in the Guinier range
qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0
qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3
noprint: if the output of autorg should be redirected to the null
device.
Outputs:
Rg as an ErrorValue
I0 as an ErrorValue
qmin: the lower end of the chosen Guinier range
qmax: the upper end of the chosen Guinier range
quality: the quality parameter, between 0 and 1
aggregation: float, the extent of aggregation
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b',
delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['autorg', filename, '-f', 'ssv']
if mininterval is not None:
cmdline.extend(['--mininterval', str(mininterval)])
if qminrg is not None:
cmdline.extend(['--sminrg', str(qminrg)])
if qmaxrg is not None:
cmdline.extend(['--smaxrg', str(qmaxrg)])
result = execute_command(cmdline, noprint=noprint)
Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)
try:
curve
except NameError:
curve = Curve.new_from_file(filename)
else:
os.unlink(filename)
return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[
int(idxlast) - 1], float(quality), float(aggregation)
def datgnom(filename, Rg=None, noprint=True):
if Rg is None:
Rg, I0, idxfirst, idxlast, quality, aggregation = autorg(filename)
execute_command(['datgnom', filename, '-r', '%f' % float(Rg)],
noprint=noprint)
gnomoutputfilename = filename.rsplit('.', 1)[0] + '.out'
gnomdata, metadata = read_gnom_pr(gnomoutputfilename, get_metadata=True)
return gnomdata, metadata
def dammif(gnomoutputfilename, prefix=None, mode='fast', symmetry='P1', N=None,
noprint=True):
if prefix is None:
prefix = 'dammif_' + gnomoutputfilename.rsplit('.', 1)[0]
if N is None:
execute_command(['dammif', '--prefix=%s' % prefix, '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
return prefix + '-1.pdb'
else:
ret = []
for i in range(N):
execute_command(['dammif', '--prefix=%s_%03d' % (prefix, i), '--omit-solvent',
'--mode=%s' % mode, '--symmetry=%s' % symmetry,
'--unit=NANOMETER', gnomoutputfilename],
noprint=noprint)
ret.append('%s_%03d-1.pdb' % (prefix, i))
return ret
def shanum(filename, dmax=None, noprint=True):
"""Execute the shanum program to determine the optimum qmax
according to an estimation of the optimum number of Shannon
channels.
Inputs:
filename: either a name of an ascii file, or an instance
of Curve
dmax: the cut-off of the P(r) function, if known. If None,
this will be determined by the shanum program
noprint: if the printout of the program is to be suppressed.
Outputs: dmax, nsh, nopt, qmaxopt
dmax: the cut-off of the P(r) function.
nsh: the estimated number of Shannon channels
nopt: the optimum number of Shannon channels
qmaxopt: the optimum value of the high-q cutoff
"""
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['shanum', filename]
if dmax is not None:
cmdline.append(str(float(dmax)))
result = execute_command(cmdline, noprint=noprint)
for l in result:
l = l.strip()
if l.startswith('Dmax='):
dmax = float(l.split('=')[1])
elif l.startswith('Smax='):
qmax = float(l.split('=')[1])
elif l.startswith('Nsh='):
nsh = float(l.split('=')[1])
elif l.startswith('Nopt='):
nopt = float(l.split('=')[1])
elif l.startswith('Sopt='):
qmaxopt = float(l.split('=')[1])
return dmax, nsh, nopt, qmaxopt
def bodies(filename, bodytypes=None, prefix=None, fit_timeout=10, Ndummyatoms=2000, noprint=True):
BODIES = ['ellipsoid', 'rotation-ellipsoid', 'cylinder', 'elliptic-cylinder', 'hollow-cylinder', 'parallelepiped',
'hollow-sphere', 'dumbbell']
if bodytypes is None:
bodytypes = BODIES
unknownbodies = [b for b in bodytypes if b not in BODIES]
if unknownbodies:
raise ValueError('Unknown body type(s): ' + ', '.join(unknownbodies))
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b', delete=False) as f:
curve.save(f)
filename = f.name
assert (prefix is not None)
else:
if prefix is None:
prefix = filename.rsplit('.', 1)[0]
fittingresults = {}
for b in bodytypes:
print('Fitting geometrical body %s' % b, flush=True)
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'f\n%s\n%d\n\n\n\n\n\n\n%s\n' % (
filename.encode('utf-8'), BODIES.index(b) + 1, prefix.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Fitting timed out.')
continue
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr:
print('Error: ', stderr, flush=True)
printing_on = False
parameter_recording_on = False
bodyparameters = []
bodyparameternames = []
fittingresults[b] = {}
for s in stdout.split('\n'):
if s.startswith(' Input file name'):
printing_on = True
if printing_on and not noprint:
print(s, flush=True)
if s.startswith(' Body type'):
parameter_recording_on = True
if s.startswith(' Parameter \'scale\''):
parameter_recording_on = False
if parameter_recording_on and s.startswith(' Parameter \''):
bodyparameters.append(float(s.split(':')[1].strip()))
bodyparameternames.append(s[s.index("'") + 1:(s.index("'") + s[s.index("'") + 1:].index("'") + 1)])
if s.startswith(' Expected Radius of Gyration'):
fittingresults[b]['Rgexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected I0'):
fittingresults[b]['I0exp'] = float(s.split(':')[1].strip())
elif s.startswith(' Expected Volume'):
fittingresults[b]['Volexp'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Radius of Gyration'):
fittingresults[b]['Rgfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit I0'):
fittingresults[b]['I0fit'] = float(s.split(':')[1].strip())
elif s.startswith(' Fit Volume'):
fittingresults[b]['Volfit'] = float(s.split(':')[1].strip())
elif s.startswith(' Goodness of Fit (chi-square)'):
fittingresults[b]['Chi2'] = float(s.split(':')[1].strip())
if 'Chi2' not in fittingresults[b]:
print('Error: cannot open file {}'.format(filename))
return
fittingresults[b]['stdout_from_bodies'] = stdout
fittingresults[b]['type'] = b
fittingresults[b]['bodyparameters'] = bodyparameters
fittingresults[b]['bodyparameternames'] = bodyparameternames
print('Creating DAM model')
damoutputfile = prefix + '-' + b + '.pdb'
p = subprocess.Popen(['bodies'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
stdout, stderr = p.communicate(input=b'd\n%d\n' % (BODIES.index(b) + 1) + b'\n'.join(
[b'%6f' % (10 * v) for v in bodyparameters]) + b'\n1\n%d\n%s\n' % (
Ndummyatoms, damoutputfile.encode('utf-8')), timeout=fit_timeout)
except subprocess.TimeoutExpired:
print('Error creating DAM model.')
if stderr:
print(stderr)
tab = [['Body', 'Goodness of Fit ($\chi^2$)', 'Rg mismatch', 'I0 mismatch', 'Volume mismatch']]
for b in sorted(fittingresults):
tab.append([
fittingresults[b]['type'] + ' (' + ', '.join(
['%s=%.3f nm' % (var, val) for var, val in zip(fittingresults[b]['bodyparameternames'],
fittingresults[b]['bodyparameters'])]) + ')',
fittingresults[b]['Chi2'],
'%.2f nm' % (fittingresults[b]['Rgfit'] - fittingresults[b]['Rgexp']),
'%5g cm$^{-1}$ sr$^{-1}$' % (fittingresults[b]['I0fit'] - fittingresults[b]['I0exp']),
'%.2f nm^3' % (fittingresults[b]['Volfit'] - fittingresults[b]['Volexp']),
])
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
display(tab)
return fittingresults
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'):
"""Run datcmp on the scattering curves.
Inputs:
*curves: scattering curves as positional arguments
alpha: confidence parameter
adjust: adjustment type (string), see the help of datcmp for details
test: test (string), see the help of datcmp for details
Outputs:
matC: the C matrix
matp: the matrix of the p values comparing the i-th and j-th exposure
matpadj: adjusted p-matrix of the exposures
ok: list of the same length as the number of curves. If True, the
given curve does not differ significantly from the others.
"""
if len({len(c) for c in curves}) != 1:
raise ValueError('All curves have to be of the same length.')
datcmpargs = []
if alpha is not None:
datcmpargs.append('--alpha=%f' % alpha)
if adjust is not None:
datcmpargs.append('--adjust=%s' % adjust)
if test is not None:
datcmpargs.append('--test=%s' % test)
with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td:
for i, c in enumerate(curves):
mat = np.zeros((len(c), 3))
mat[:, 0] = c.q
mat[:, 1] = c.Intensity
mat[:, 2] = c.Error
np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat)
matC = np.zeros((len(curves), len(curves))) + np.nan
matp = np.zeros((len(curves), len(curves))) + np.nan
matpadj = np.zeros((len(curves), len(curves))) + np.nan
ok = np.zeros(len(curves)) + np.nan
try:
results = subprocess.check_output(
['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode(
'utf-8')
except subprocess.CalledProcessError:
pass
else:
for l in results.split('\n'):
m = re.match(
'^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$',
l)
if m is not None:
i = int(m.group('i')) - 1
j = int(m.group('j')) - 1
matC[i, j] = matC[j, i] = float(m.group('C'))
matp[i, j] = matp[j, i] = float(m.group('p'))
matpadj[i, j] = matpadj[j, i] = float(m.group('adjp'))
else:
m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l)
if m is not None:
ok[int(m.group('i')) - 1] = (m.group('ack') == '*')
return matC, matp, matpadj, ok
def datporod(gnomoutfile):
"""Run datporod and return the estimated Porod volume.
Returns:
Radius of gyration found in the input file
I0 found in the input file
Vporod: the estimated Porod volume
"""
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return float(results[0]), float(results[1]), float(results[2])
|
awacha/credolib
|
credolib/plotting.py
|
guinierplot
|
python
|
def guinierplot(*args, **kwargs):
ret=plotsascurve(*args, **kwargs)
plt.xscale('power',exponent=2)
plt.yscale('log')
return ret
|
Make a Guinier plot. This is simply a wrapper around plotsascurve().
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/plotting.py#L40-L45
|
[
"def plotsascurve(samplename, *args, **kwargs):\n if 'dist' not in kwargs:\n kwargs['dist'] = None\n data1d, dist = getsascurve(samplename, kwargs['dist'])\n del kwargs['dist']\n if 'factor' in kwargs:\n factor=kwargs['factor']\n del kwargs['factor']\n else:\n factor=1\n if 'label' not in kwargs:\n if isinstance(dist, str):\n kwargs['label'] = samplename + ' ' + dist\n else:\n kwargs['label'] = samplename + ' %g mm' % dist\n if 'errorbar' in kwargs:\n errorbars = bool(kwargs['errorbar'])\n del kwargs['errorbar']\n else:\n errorbars = False\n if errorbars:\n ret = (data1d*factor).errorbar(*args, **kwargs)\n plt.xscale('log')\n plt.yscale('log')\n else:\n ret = (data1d*factor).loglog(*args, **kwargs)\n plt.xlabel('q (' + qunit() + ')')\n plt.ylabel('$d\\\\Sigma/d\\\\Omega$ (cm$^{-1}$ sr$^{-1}$)')\n plt.legend(loc='best')\n plt.grid(True, which='both')\n plt.axis('tight')\n return ret\n"
] |
__all__=['plotsascurve','guinierplot','kratkyplot']
from .io import getsascurve
import matplotlib.pyplot as plt
from sastool.libconfig import qunit, dunit
def plotsascurve(samplename, *args, **kwargs):
if 'dist' not in kwargs:
kwargs['dist'] = None
data1d, dist = getsascurve(samplename, kwargs['dist'])
del kwargs['dist']
if 'factor' in kwargs:
factor=kwargs['factor']
del kwargs['factor']
else:
factor=1
if 'label' not in kwargs:
if isinstance(dist, str):
kwargs['label'] = samplename + ' ' + dist
else:
kwargs['label'] = samplename + ' %g mm' % dist
if 'errorbar' in kwargs:
errorbars = bool(kwargs['errorbar'])
del kwargs['errorbar']
else:
errorbars = False
if errorbars:
ret = (data1d*factor).errorbar(*args, **kwargs)
plt.xscale('log')
plt.yscale('log')
else:
ret = (data1d*factor).loglog(*args, **kwargs)
plt.xlabel('q (' + qunit() + ')')
plt.ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
plt.legend(loc='best')
plt.grid(True, which='both')
plt.axis('tight')
return ret
def kratkyplot(samplename, *args, **kwargs):
if 'dist' not in kwargs:
kwargs['dist'] = None
data1d, dist = getsascurve(samplename, kwargs['dist'])
del kwargs['dist']
if 'factor' in kwargs:
factor=kwargs['factor']
del kwargs['factor']
else:
factor=1
if 'label' not in kwargs:
if isinstance(dist, str):
kwargs['label'] = samplename + ' ' + dist
else:
kwargs['label'] = samplename + ' %g mm' % dist
if 'errorbar' in kwargs:
errorbars = bool(kwargs['errorbar'])
del kwargs['errorbar']
else:
errorbars = False
data1dscaled=data1d*factor
if errorbars:
if hasattr(data1dscaled, 'dx'):
dx=data1dscaled.qError
dy=(data1dscaled.Error ** 2 * data1dscaled.q ** 4 +
data1dscaled.Intensity ** 2 * data1dscaled.qError ** 2
* data1dscaled.q ** 2 * 4) ** 0.5
else:
dx=None
dy=data1dscaled.Error
ret = plt.errorbar(data1dscaled.q,
data1dscaled.q ** 2 * data1dscaled.Intensity,
dy, dx, *args, **kwargs)
else:
ret = plt.plot(data1dscaled.q,
data1dscaled.Intensity * data1dscaled.q ** 2,
*args, **kwargs)
plt.xlabel('q (' + dunit() + ')')
plt.ylabel('$q^2 d\\Sigma/d\\Omega$ (' +
dunit() +
'$^{-2}$ cm$^{-1}$ sr$^{-1}$)')
plt.legend(loc='best')
plt.grid(True, which='both')
plt.axis('tight')
return ret
def porodplot(samplename, *args, **kwargs):
if 'dist' not in kwargs:
kwargs['dist'] = None
data1d, dist = getsascurve(samplename, kwargs['dist'])
del kwargs['dist']
if 'factor' in kwargs:
factor=kwargs['factor']
del kwargs['factor']
else:
factor=1
if 'label' not in kwargs:
if isinstance(dist, str):
kwargs['label'] = samplename + ' ' + dist
else:
kwargs['label'] = samplename + ' %g mm' % dist
if 'errorbar' in kwargs:
errorbars = bool(kwargs['errorbar'])
del kwargs['errorbar']
else:
errorbars = False
data1dscaled=data1d*factor
if errorbars:
if hasattr(data1dscaled, 'dx'):
dx=data1dscaled.qError
dy=(data1dscaled.Error ** 2 * data1dscaled.q ** 8 +
data1dscaled.Intensity ** 2 * data1dscaled.qError ** 2
* data1dscaled.q ** 6 * 14) ** 0.5
else:
dx=None
dy=data1dscaled.Error
ret = plt.errorbar(data1dscaled.q,
data1dscaled.q ** 4 * data1dscaled.Intensity,
dy, dx, *args, **kwargs)
else:
ret = plt.plot(data1dscaled.q,
data1dscaled.Intensity * data1dscaled.q ** 2,
*args, **kwargs)
plt.xlabel('q (' + dunit() + ')')
plt.ylabel('$q^4 d\\Sigma/d\\Omega$ (' +
dunit() +
'$^{-4}$ cm$^{-1}$ sr$^{-1}$)')
plt.legend(loc='best')
plt.xscale('power',exponent=4)
plt.yscale('linear')
plt.grid(True, which='both')
plt.axis('tight')
return ret
|
awacha/credolib
|
credolib/procedures.py
|
summarize
|
python
|
def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg
|
Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/procedures.py#L136-L367
|
[
"def writemarkdown(*args):\n display(Markdown(' '.join(str(a) for a in args)))\n",
"def get_different_distances(headers, tolerance=2) -> List[float]:\n alldists = {float(h.distance) for h in headers}\n dists = []\n for d in alldists:\n if [d_ for d_ in dists if abs(d - d_) < tolerance]:\n continue\n dists.append(d)\n return sorted(dists)\n",
"def print_abscissavalue(q, wavelength=None, distance=None, digits=10):\n qunit = sastool.libconfig.qunit()\n dunit = sastool.libconfig.dunit()\n formatstring='%%.%df'%digits\n retval = str(q) + ' ' + qunit\n retval = retval + \"(\"\n retval = retval + \" <=> \" + formatstring %(2 * np.pi / q) + \" \" + dunit + \"(d)\"\n retval = retval + \" <=> \" + formatstring %(1 / q) + \" \" + dunit + \"(Rg)\"\n if wavelength is not None:\n tth_rad = 2 * np.arcsin((q * wavelength) / 4 / np.pi)\n tth_deg = tth_rad * 180.0 / np.pi\n retval = retval + \" <=> \" + formatstring %(tth_deg) + \"\\xb0\"\n if distance is not None:\n radius = np.tan(tth_rad) * distance\n retval = retval + \" <=> \" + formatstring % (radius) + \" mm(r)\"\n retval = retval + \")\"\n return retval\n",
"def putlogo(figure=None):\n \"\"\"Puts the CREDO logo at the bottom right of the current figure (or\n the figure given by the ``figure`` argument if supplied).\n \"\"\"\n ip = get_ipython()\n if figure is None:\n figure=plt.gcf()\n curraxis= figure.gca()\n logoaxis = figure.add_axes([0.89, 0.01, 0.1, 0.1], anchor='NW')\n logoaxis.set_axis_off()\n logoaxis.xaxis.set_visible(False)\n logoaxis.yaxis.set_visible(False)\n logoaxis.imshow(credo_logo)\n figure.subplots_adjust(right=0.98)\n figure.sca(curraxis)\n",
"def _collect_data_for_summarization(headers, raw, reintegrate, qrange):\n ip = get_ipython()\n data1d = []\n data2d = 0\n headersout = []\n if not headers:\n return\n for head in headers:\n try:\n mo = ip.user_ns['mask_override'](head)\n except KeyError:\n mo = None\n ex = None\n last_exception = None\n try:\n ex = load_exposure(head.fsn, raw=raw, processed=not raw)\n assert isinstance(ex, Exposure)\n if mo is not None:\n try:\n ex.mask = ex.loader.loadmask(mo)\n except FileNotFoundError:\n print('Could not load mask: %s' % mo)\n raise FileNotFoundError('Could not load mask: %s' % mo)\n except FileNotFoundError as exc:\n last_exception = sys.exc_info()\n if ex is None:\n print('Could not load {} 2D file for FSN {:d}. Exception: {}'.format(\n ['processed', 'raw'][raw], head.fsn, '\\n'.join(traceback.format_exception(*last_exception))))\n ip.user_ns['badfsns'] = set(ip.user_ns['badfsns'])\n ip.user_ns['badfsns'].add(head.fsn)\n continue\n ex.header = head\n curve = None\n if not reintegrate:\n for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed != raw]:\n try:\n curve = l.loadcurve(head.fsn)\n break\n except FileNotFoundError:\n continue\n if curve is None:\n print('Cannot load curve for FSN %d: reintegrating.' % head.fsn)\n if curve is None:\n # this happens if reintegrate==True or if reintegrate==False but the curve could not be loaded.\n curve = ex.radial_average(qrange, errorpropagation=3,\n abscissa_errorpropagation=3, raw_result=False)\n curve = curve.sanitize()\n data1d.append(curve)\n\n data1d[-1].save(os.path.join(ip.user_ns['saveto_dir'], 'curve_%05d.txt' % head.fsn))\n mat = np.zeros((len(data1d[-1]), 3))\n mat[:, 0] = data1d[-1].q\n mat[:, 1] = data1d[-1].Intensity\n mat[:, 2] = data1d[-1].Error\n np.savetxt(os.path.join(ip.user_ns['saveto_dir'], 'curve_%s_%05d.dat' % (head.title, head.fsn)), mat)\n del mat\n data2d = data2d + ex\n headersout.append(ex.header)\n data2d /= len(data1d)\n return data1d, data2d, headersout\n",
"def _stabilityassessment(headers, data1d, dist, fig_correlmatrices, correlmatrixaxes, std_multiplier,\n correlmatrix_colormap,\n correlmatrix_filename, logarithmic_correlmatrix=True, cormaptest=True):\n # calculate and plot correlation matrix\n cmatrix, badidx, rowavg = correlmatrix(data1d, std_multiplier, logarithmic_correlmatrix)\n rowavgmean = rowavg.mean()\n rowavgstd = rowavg.std()\n writemarkdown('#### Assessing sample stability')\n writemarkdown(\"- Mean of row averages: \" + str(rowavgmean))\n writemarkdown(\"- Std of row averages: \" + str(rowavgstd) + ' (%.2f %%)' % (rowavgstd / rowavgmean * 100))\n\n img = correlmatrixaxes.imshow(cmatrix, interpolation='nearest', cmap=matplotlib.cm.get_cmap(correlmatrix_colormap))\n cax = make_axes_locatable(correlmatrixaxes).append_axes('right', size=\"5%\", pad=0.1)\n fig_correlmatrices.colorbar(img, cax=cax)\n fsns = [h.fsn for h in headers]\n\n correlmatrixaxes.set_title('%.2f mm' % dist)\n correlmatrixaxes.set_xticks(list(range(len(data1d))))\n correlmatrixaxes.set_xticklabels([str(f) for f in fsns], rotation='vertical')\n correlmatrixaxes.set_yticks(list(range(len(data1d))))\n correlmatrixaxes.set_yticklabels([str(f) for f in fsns])\n np.savez_compressed(correlmatrix_filename,\n correlmatrix=cmatrix, fsns=np.array(fsns))\n\n # Report table on sample stability\n tab = [['FSN', 'Date', 'Discrepancy', 'Relative discrepancy ((x-mean(x))/std(x))', 'Quality', 'Quality (cormap)']]\n badfsns = []\n badfsns_datcmp = []\n if cormaptest:\n matC, matp, matpadj, datcmp_ok = datcmp(*data1d)\n else:\n datcmp_ok = [not x for x in badidx]\n for h, bad, discr, dcmp_ok in zip(headers, badidx, rowavg, datcmp_ok):\n tab.append([h.fsn, h.date.isoformat(), discr, (discr - rowavgmean) / rowavgstd,\n [\"\\u2713\", \"\\u2718\\u2718\\u2718\\u2718\\u2718\"][bad],\n [\"\\u2713\", \"\\u2718\\u2718\\u2718\\u2718\\u2718\"][dcmp_ok != 1]])\n if bad:\n badfsns.append(h.fsn)\n if (not dcmp_ok and not np.isnan(dcmp_ok)):\n badfsns_datcmp.append(h.fsn)\n tab = ipy_table.IpyTable(tab)\n tab.apply_theme('basic')\n return badfsns, badfsns_datcmp, tab, rowavg\n"
] |
__all__ = ['summarize', 'unite', 'subtract_bg']
import numbers
import os
import sys
import traceback
import ipy_table
import matplotlib
import matplotlib.cm
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.display import display
from mpl_toolkits.axes_grid import make_axes_locatable
from sastool.classes2 import Curve, Exposure
from sastool.libconfig import qunit
from sastool.misc.easylsq import FixedParameter, nonlinear_odr
from sastool.misc.errorvalue import ErrorValue
from .atsas import datcmp
from .calculation import correlmatrix
from .io import get_different_distances, load_exposure
from .plotting import plotsascurve
from .utils import print_abscissavalue, putlogo, writemarkdown
def _collect_data_for_summarization(headers, raw, reintegrate, qrange):
ip = get_ipython()
data1d = []
data2d = 0
headersout = []
if not headers:
return
for head in headers:
try:
mo = ip.user_ns['mask_override'](head)
except KeyError:
mo = None
ex = None
last_exception = None
try:
ex = load_exposure(head.fsn, raw=raw, processed=not raw)
assert isinstance(ex, Exposure)
if mo is not None:
try:
ex.mask = ex.loader.loadmask(mo)
except FileNotFoundError:
print('Could not load mask: %s' % mo)
raise FileNotFoundError('Could not load mask: %s' % mo)
except FileNotFoundError as exc:
last_exception = sys.exc_info()
if ex is None:
print('Could not load {} 2D file for FSN {:d}. Exception: {}'.format(
['processed', 'raw'][raw], head.fsn, '\n'.join(traceback.format_exception(*last_exception))))
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns'])
ip.user_ns['badfsns'].add(head.fsn)
continue
ex.header = head
curve = None
if not reintegrate:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed != raw]:
try:
curve = l.loadcurve(head.fsn)
break
except FileNotFoundError:
continue
if curve is None:
print('Cannot load curve for FSN %d: reintegrating.' % head.fsn)
if curve is None:
# this happens if reintegrate==True or if reintegrate==False but the curve could not be loaded.
curve = ex.radial_average(qrange, errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False)
curve = curve.sanitize()
data1d.append(curve)
data1d[-1].save(os.path.join(ip.user_ns['saveto_dir'], 'curve_%05d.txt' % head.fsn))
mat = np.zeros((len(data1d[-1]), 3))
mat[:, 0] = data1d[-1].q
mat[:, 1] = data1d[-1].Intensity
mat[:, 2] = data1d[-1].Error
np.savetxt(os.path.join(ip.user_ns['saveto_dir'], 'curve_%s_%05d.dat' % (head.title, head.fsn)), mat)
del mat
data2d = data2d + ex
headersout.append(ex.header)
data2d /= len(data1d)
return data1d, data2d, headersout
def _stabilityassessment(headers, data1d, dist, fig_correlmatrices, correlmatrixaxes, std_multiplier,
correlmatrix_colormap,
correlmatrix_filename, logarithmic_correlmatrix=True, cormaptest=True):
# calculate and plot correlation matrix
cmatrix, badidx, rowavg = correlmatrix(data1d, std_multiplier, logarithmic_correlmatrix)
rowavgmean = rowavg.mean()
rowavgstd = rowavg.std()
writemarkdown('#### Assessing sample stability')
writemarkdown("- Mean of row averages: " + str(rowavgmean))
writemarkdown("- Std of row averages: " + str(rowavgstd) + ' (%.2f %%)' % (rowavgstd / rowavgmean * 100))
img = correlmatrixaxes.imshow(cmatrix, interpolation='nearest', cmap=matplotlib.cm.get_cmap(correlmatrix_colormap))
cax = make_axes_locatable(correlmatrixaxes).append_axes('right', size="5%", pad=0.1)
fig_correlmatrices.colorbar(img, cax=cax)
fsns = [h.fsn for h in headers]
correlmatrixaxes.set_title('%.2f mm' % dist)
correlmatrixaxes.set_xticks(list(range(len(data1d))))
correlmatrixaxes.set_xticklabels([str(f) for f in fsns], rotation='vertical')
correlmatrixaxes.set_yticks(list(range(len(data1d))))
correlmatrixaxes.set_yticklabels([str(f) for f in fsns])
np.savez_compressed(correlmatrix_filename,
correlmatrix=cmatrix, fsns=np.array(fsns))
# Report table on sample stability
tab = [['FSN', 'Date', 'Discrepancy', 'Relative discrepancy ((x-mean(x))/std(x))', 'Quality', 'Quality (cormap)']]
badfsns = []
badfsns_datcmp = []
if cormaptest:
matC, matp, matpadj, datcmp_ok = datcmp(*data1d)
else:
datcmp_ok = [not x for x in badidx]
for h, bad, discr, dcmp_ok in zip(headers, badidx, rowavg, datcmp_ok):
tab.append([h.fsn, h.date.isoformat(), discr, (discr - rowavgmean) / rowavgstd,
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][bad],
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][dcmp_ok != 1]])
if bad:
badfsns.append(h.fsn)
if (not dcmp_ok and not np.isnan(dcmp_ok)):
badfsns_datcmp.append(h.fsn)
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
return badfsns, badfsns_datcmp, tab, rowavg
def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
"""Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
"""
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg
def _merge_two_curves(curve1: Curve, curve2: Curve, qmin, qmax, qsep, use_additive_constant=False):
"""Merge two scattering curves
:param curve1: the first curve (longer distance)
:type curve1: sastool.classes.curve.GeneralCurve
:param curve2: the second curve (shorter distance)
:type curve2: sastool.classes.curve.GeneralCurve
:param qmin: lower bound of the interval for determining the scaling factor
:type qmin: float
:param qmax: upper bound of the interval for determining the scaling factor
:type qmax: float
:param qsep: separating (tailoring) point for the merge
:type qsep: float
:return: merged_curve, factor, background, stat
:rtype tuple of a sastool.classes2.curve.Curve and a float
"""
curve1=curve1.sanitize()
curve2=curve2.sanitize()
if len(curve1.trim(qmin, qmax)) > len(curve2.trim(qmin, qmax)):
curve2_interp = curve2.trim(qmin, qmax)
curve1_interp = curve1.interpolate(curve2_interp.q)
else:
curve1_interp = curve1.trim(qmin, qmax)
curve2_interp = curve2.interpolate(curve1_interp.q)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(curve2_interp.Intensity, curve1_interp.Intensity,
curve2_interp.Error, curve1_interp.Error,
lambda x, factor, bg: x * factor + bg, [1.0, bg_init])
return Curve.merge(curve1 - bg, curve2 * factor, qsep), factor, bg, stat
def _scale_two_exposures(exp1, exp2, qmin, qmax, N=10, use_additive_constant=False):
qrange = np.linspace(qmin, qmax, N)
rad1 = exp1.radial_average(qrange=qrange, raw_result=False)
rad2 = exp2.radial_average(qrange=qrange, raw_result=False)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(rad2.y, rad1.y, rad2.dy, rad1.dy, lambda x, factor, bg: x * factor + bg,
[1, bg_init])
return factor, bg
def unite(samplename, uniqmin=[], uniqmax=[], uniqsep=[], graph_ncols=2, graph_subplotpars={'hspace': 0.3},
graph_extension='png', graph_dpi=80, additive_constant=False):
ip = get_ipython()
if isinstance(uniqmin, numbers.Number):
uniqmin = [uniqmin]
if isinstance(uniqmax, numbers.Number):
uniqmax = [uniqmax]
if isinstance(uniqsep, numbers.Number):
uniqsep = [uniqsep]
data1d = ip.user_ns['_data1d'][samplename]
print("Uniting measurements of sample %s at different s-d distances" % samplename)
uniparams = {'qmin': uniqmin, 'qmax': uniqmax, 'qsep': uniqsep}
for p in uniparams:
uniparams[p] = uniparams[p] + [None] * \
max(0, len(data1d) - 1 - len(uniparams[p]))
dists = list(reversed(sorted(data1d.keys())))
if len(dists) < 2:
print("Less than two distances found for sample %s; no point of uniting." % samplename)
return
united = None
graph_nrows = int(
np.ceil((len(dists)) / (graph_ncols * 1.0)))
fig = plt.figure()
unitedaxis = fig.add_subplot(graph_nrows, graph_ncols, 1)
factor = 1.0
for idx, dist1, dist2, qmin, qmax, qsep in zip(list(range(len(dists) - 1)),
dists[:-1], dists[1:],
uniparams['qmin'],
uniparams['qmax'],
uniparams['qsep']):
print(" Scaling together distances %f and %f mm" % (dist1, dist2), flush=True)
if united is None:
united = data1d[dist1]
if qmin is None:
qmin = data1d[dist2].sanitize().q.min()
print(" Auto-detected qmin:", qmin, flush=True)
if qmax is None:
qmax = data1d[dist1].sanitize().q.max()
print(" Auto-detected qmax:", qmax, flush=True)
if qsep is None:
qsep = 0.5 * (qmin + qmax)
print(" Auto-detected qsep:", qsep, flush=True)
ax = fig.add_subplot(graph_nrows, graph_ncols, 2 + idx)
(factor * data1d[dist1]).loglog(axes=ax, label='%.2f mm' % dist1)
united, factor1, bg, stat = _merge_two_curves(united,
data1d[dist2], qmin, qmax, qsep,
use_additive_constant=additive_constant)
factor = factor1 * factor
uniparams['qmin'][idx] = qmin
uniparams['qmax'][idx] = qmax
uniparams['qsep'][idx] = qsep
print(" Scaling factor is", factor.tostring(), flush=True)
if not additive_constant:
print(" Additive constant has not been used.", flush=True)
else:
print(" Additive constant is:", bg.tostring(), flush=True)
print(" Reduced Chi^2 of the ODR fit:", stat['Chi2_reduced'], flush=True)
print(" DoF of the ODR fit:", stat['DoF'], flush=True)
(data1d[dist2] * factor + bg).loglog(axes=ax, label='%.2f mm' % dist2)
ax.set_xlabel('q (' + qunit() + ')')
ax.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
ax.legend(loc='best')
# ax.grid(which='both')
ax.axis('tight')
ax.set_title('Factor: ' + str(factor))
lims = ax.axis()
ax.plot([qmin, qmin], lims[2:], '--r', lw=2)
ax.plot([qmax, qmax], lims[2:], '--r', lw=2)
ax.plot([qsep, qsep], lims[2:], '--k')
ax.grid(True, which='both')
if '_data1dunited' not in ip.user_ns:
ip.user_ns['_data1dunited'] = {}
united.loglog(axes=unitedaxis)
unitedaxis.set_xlabel('q (' + qunit() + ')')
unitedaxis.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
unitedaxis.legend(loc='best')
unitedaxis.set_title('United scattering of %s' % samplename)
unitedaxis.grid(True, which='both')
# unitedaxis.grid(which='both')
unitedaxis.axis('tight')
lims = unitedaxis.axis()
for qs in uniparams['qsep']:
unitedaxis.plot([qs] * 2, lims[2:], '--r')
ip.user_ns['_data1dunited'][samplename] = united
putlogo()
fig.subplots_adjust(**graph_subplotpars)
plt.savefig(
os.path.join(ip.user_ns['auximages_dir'], 'uniting_' + samplename + '.' + graph_extension), dpi=graph_dpi)
print(" United curve spans the following ranges:")
print(" q_min: ",
print_abscissavalue(united.q.min(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max: ",
print_abscissavalue(united.q.max(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max/q_min:", united.q.max() / united.q.min())
print(" I_min: ", united.Intensity.min(), "cm^{-1}")
print(" I_max: ", united.Intensity.max(), "cm^{-1}")
print(" I_max/I_min:", united.Intensity.max() / united.Intensity.min())
print(" # of points: ", len(united))
united.save(os.path.join(ip.user_ns['saveto_dir'], 'united_' + samplename + '.txt'))
plt.show()
def subtract_bg(samplename, bgname, factor=1, distance=None, disttolerance=2,
subname=None, qrange=(), graph_extension='png', graph_dpi=80):
"""Subtract background from measurements.
Inputs:
samplename: the name of the sample
bgname: the name of the background measurements. Alternatively, it can
be a numeric value (float or ErrorValue), which will be subtracted.
If None, this constant will be determined by integrating the
scattering curve in the range given by qrange.
factor: the background curve will be multiplied by this
distance: if None, do the subtraction for all sample-to-detector distances.
Otherwise give here the value of the sample-to-detector distance.
qrange: a tuple (qmin, qmax)
disttolerance: the tolerance in which two distances are considered
equal.
subname: the sample name of the background-corrected curve. The default
is samplename + '-' + bgname
"""
ip = get_ipython()
data1d = ip.user_ns['_data1d']
data2d = ip.user_ns['_data2d']
if 'subtractedsamplenames' not in ip.user_ns:
ip.user_ns['subtractedsamplenames'] = set()
subtractedsamplenames = ip.user_ns['subtractedsamplenames']
if subname is None:
if isinstance(bgname, str):
subname = samplename + '-' + bgname
else:
subname = samplename + '-const'
if distance is None:
dists = data1d[samplename]
else:
dists = [d for d in data1d[samplename] if abs(d - distance) < disttolerance]
for dist in dists:
if isinstance(bgname, str):
if not disttolerance:
if dist not in data1d[bgname]:
print(
'Warning: Missing distance %g for background measurement (samplename: %s, background samplename: %s)' % (
dist, samplename, bgname))
continue
else:
bgdist = dist
else:
bgdist = sorted([(d, r) for (d, r) in [(d, np.abs(d - dist)) for d in list(data1d[bgname].keys())] if
r <= disttolerance], key=lambda x: x[1])[0][0]
if subname not in data1d:
data1d[subname] = {}
if subname not in data2d:
data2d[subname] = {}
if subname not in ip.user_ns['_headers_sample']:
ip.user_ns['_headers_sample'][subname] = {}
data1_s = data1d[samplename][dist]
data2_s = data2d[samplename][dist]
if isinstance(bgname, str):
data1_bg = data1d[bgname][bgdist]
data2_bg = data2d[bgname][bgdist]
if factor is None:
factor = data1_s.trim(*qrange).momentum(0) / data1_bg.trim(*qrange).momentum(0)
elif bgname is None:
data1_bg = data1_s.trim(*qrange).momentum(0)
data2_bg = data1_bg
else:
data1_bg = bgname
data2_bg = bgname
if factor is None:
factor = 1
data1d[subname][dist] = data1_s - factor * data1_bg
data2d[subname][dist] = data2_s - factor * data2_bg
data1d[subname][dist].save(
os.path.join(ip.user_ns['saveto_dir'], subname + '_' + ('%.2f' % dist).replace('.', '_') + '.txt'))
ip.user_ns['_headers_sample'][subname][dist] = ip.user_ns['_headers_sample'][samplename][
dist] # ugly hack, I have no better idea.
plt.figure()
plotsascurve(samplename, dist=dist)
if isinstance(bgname, str):
plotsascurve(bgname, dist=dist, factor=factor)
plotsascurve(subname, dist=dist)
plt.savefig(os.path.join(ip.user_ns['auximages_dir'],
'subtractbg_' + samplename + '.' + graph_extension),
dpi=graph_dpi)
subtractedsamplenames.add(subname)
|
awacha/credolib
|
credolib/procedures.py
|
_merge_two_curves
|
python
|
def _merge_two_curves(curve1: Curve, curve2: Curve, qmin, qmax, qsep, use_additive_constant=False):
curve1=curve1.sanitize()
curve2=curve2.sanitize()
if len(curve1.trim(qmin, qmax)) > len(curve2.trim(qmin, qmax)):
curve2_interp = curve2.trim(qmin, qmax)
curve1_interp = curve1.interpolate(curve2_interp.q)
else:
curve1_interp = curve1.trim(qmin, qmax)
curve2_interp = curve2.interpolate(curve1_interp.q)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(curve2_interp.Intensity, curve1_interp.Intensity,
curve2_interp.Error, curve1_interp.Error,
lambda x, factor, bg: x * factor + bg, [1.0, bg_init])
return Curve.merge(curve1 - bg, curve2 * factor, qsep), factor, bg, stat
|
Merge two scattering curves
:param curve1: the first curve (longer distance)
:type curve1: sastool.classes.curve.GeneralCurve
:param curve2: the second curve (shorter distance)
:type curve2: sastool.classes.curve.GeneralCurve
:param qmin: lower bound of the interval for determining the scaling factor
:type qmin: float
:param qmax: upper bound of the interval for determining the scaling factor
:type qmax: float
:param qsep: separating (tailoring) point for the merge
:type qsep: float
:return: merged_curve, factor, background, stat
:rtype tuple of a sastool.classes2.curve.Curve and a float
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/procedures.py#L370-L401
| null |
__all__ = ['summarize', 'unite', 'subtract_bg']
import numbers
import os
import sys
import traceback
import ipy_table
import matplotlib
import matplotlib.cm
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.display import display
from mpl_toolkits.axes_grid import make_axes_locatable
from sastool.classes2 import Curve, Exposure
from sastool.libconfig import qunit
from sastool.misc.easylsq import FixedParameter, nonlinear_odr
from sastool.misc.errorvalue import ErrorValue
from .atsas import datcmp
from .calculation import correlmatrix
from .io import get_different_distances, load_exposure
from .plotting import plotsascurve
from .utils import print_abscissavalue, putlogo, writemarkdown
def _collect_data_for_summarization(headers, raw, reintegrate, qrange):
ip = get_ipython()
data1d = []
data2d = 0
headersout = []
if not headers:
return
for head in headers:
try:
mo = ip.user_ns['mask_override'](head)
except KeyError:
mo = None
ex = None
last_exception = None
try:
ex = load_exposure(head.fsn, raw=raw, processed=not raw)
assert isinstance(ex, Exposure)
if mo is not None:
try:
ex.mask = ex.loader.loadmask(mo)
except FileNotFoundError:
print('Could not load mask: %s' % mo)
raise FileNotFoundError('Could not load mask: %s' % mo)
except FileNotFoundError as exc:
last_exception = sys.exc_info()
if ex is None:
print('Could not load {} 2D file for FSN {:d}. Exception: {}'.format(
['processed', 'raw'][raw], head.fsn, '\n'.join(traceback.format_exception(*last_exception))))
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns'])
ip.user_ns['badfsns'].add(head.fsn)
continue
ex.header = head
curve = None
if not reintegrate:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed != raw]:
try:
curve = l.loadcurve(head.fsn)
break
except FileNotFoundError:
continue
if curve is None:
print('Cannot load curve for FSN %d: reintegrating.' % head.fsn)
if curve is None:
# this happens if reintegrate==True or if reintegrate==False but the curve could not be loaded.
curve = ex.radial_average(qrange, errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False)
curve = curve.sanitize()
data1d.append(curve)
data1d[-1].save(os.path.join(ip.user_ns['saveto_dir'], 'curve_%05d.txt' % head.fsn))
mat = np.zeros((len(data1d[-1]), 3))
mat[:, 0] = data1d[-1].q
mat[:, 1] = data1d[-1].Intensity
mat[:, 2] = data1d[-1].Error
np.savetxt(os.path.join(ip.user_ns['saveto_dir'], 'curve_%s_%05d.dat' % (head.title, head.fsn)), mat)
del mat
data2d = data2d + ex
headersout.append(ex.header)
data2d /= len(data1d)
return data1d, data2d, headersout
def _stabilityassessment(headers, data1d, dist, fig_correlmatrices, correlmatrixaxes, std_multiplier,
correlmatrix_colormap,
correlmatrix_filename, logarithmic_correlmatrix=True, cormaptest=True):
# calculate and plot correlation matrix
cmatrix, badidx, rowavg = correlmatrix(data1d, std_multiplier, logarithmic_correlmatrix)
rowavgmean = rowavg.mean()
rowavgstd = rowavg.std()
writemarkdown('#### Assessing sample stability')
writemarkdown("- Mean of row averages: " + str(rowavgmean))
writemarkdown("- Std of row averages: " + str(rowavgstd) + ' (%.2f %%)' % (rowavgstd / rowavgmean * 100))
img = correlmatrixaxes.imshow(cmatrix, interpolation='nearest', cmap=matplotlib.cm.get_cmap(correlmatrix_colormap))
cax = make_axes_locatable(correlmatrixaxes).append_axes('right', size="5%", pad=0.1)
fig_correlmatrices.colorbar(img, cax=cax)
fsns = [h.fsn for h in headers]
correlmatrixaxes.set_title('%.2f mm' % dist)
correlmatrixaxes.set_xticks(list(range(len(data1d))))
correlmatrixaxes.set_xticklabels([str(f) for f in fsns], rotation='vertical')
correlmatrixaxes.set_yticks(list(range(len(data1d))))
correlmatrixaxes.set_yticklabels([str(f) for f in fsns])
np.savez_compressed(correlmatrix_filename,
correlmatrix=cmatrix, fsns=np.array(fsns))
# Report table on sample stability
tab = [['FSN', 'Date', 'Discrepancy', 'Relative discrepancy ((x-mean(x))/std(x))', 'Quality', 'Quality (cormap)']]
badfsns = []
badfsns_datcmp = []
if cormaptest:
matC, matp, matpadj, datcmp_ok = datcmp(*data1d)
else:
datcmp_ok = [not x for x in badidx]
for h, bad, discr, dcmp_ok in zip(headers, badidx, rowavg, datcmp_ok):
tab.append([h.fsn, h.date.isoformat(), discr, (discr - rowavgmean) / rowavgstd,
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][bad],
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][dcmp_ok != 1]])
if bad:
badfsns.append(h.fsn)
if (not dcmp_ok and not np.isnan(dcmp_ok)):
badfsns_datcmp.append(h.fsn)
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
return badfsns, badfsns_datcmp, tab, rowavg
def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
"""Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
"""
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg
def _scale_two_exposures(exp1, exp2, qmin, qmax, N=10, use_additive_constant=False):
qrange = np.linspace(qmin, qmax, N)
rad1 = exp1.radial_average(qrange=qrange, raw_result=False)
rad2 = exp2.radial_average(qrange=qrange, raw_result=False)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(rad2.y, rad1.y, rad2.dy, rad1.dy, lambda x, factor, bg: x * factor + bg,
[1, bg_init])
return factor, bg
def unite(samplename, uniqmin=[], uniqmax=[], uniqsep=[], graph_ncols=2, graph_subplotpars={'hspace': 0.3},
graph_extension='png', graph_dpi=80, additive_constant=False):
ip = get_ipython()
if isinstance(uniqmin, numbers.Number):
uniqmin = [uniqmin]
if isinstance(uniqmax, numbers.Number):
uniqmax = [uniqmax]
if isinstance(uniqsep, numbers.Number):
uniqsep = [uniqsep]
data1d = ip.user_ns['_data1d'][samplename]
print("Uniting measurements of sample %s at different s-d distances" % samplename)
uniparams = {'qmin': uniqmin, 'qmax': uniqmax, 'qsep': uniqsep}
for p in uniparams:
uniparams[p] = uniparams[p] + [None] * \
max(0, len(data1d) - 1 - len(uniparams[p]))
dists = list(reversed(sorted(data1d.keys())))
if len(dists) < 2:
print("Less than two distances found for sample %s; no point of uniting." % samplename)
return
united = None
graph_nrows = int(
np.ceil((len(dists)) / (graph_ncols * 1.0)))
fig = plt.figure()
unitedaxis = fig.add_subplot(graph_nrows, graph_ncols, 1)
factor = 1.0
for idx, dist1, dist2, qmin, qmax, qsep in zip(list(range(len(dists) - 1)),
dists[:-1], dists[1:],
uniparams['qmin'],
uniparams['qmax'],
uniparams['qsep']):
print(" Scaling together distances %f and %f mm" % (dist1, dist2), flush=True)
if united is None:
united = data1d[dist1]
if qmin is None:
qmin = data1d[dist2].sanitize().q.min()
print(" Auto-detected qmin:", qmin, flush=True)
if qmax is None:
qmax = data1d[dist1].sanitize().q.max()
print(" Auto-detected qmax:", qmax, flush=True)
if qsep is None:
qsep = 0.5 * (qmin + qmax)
print(" Auto-detected qsep:", qsep, flush=True)
ax = fig.add_subplot(graph_nrows, graph_ncols, 2 + idx)
(factor * data1d[dist1]).loglog(axes=ax, label='%.2f mm' % dist1)
united, factor1, bg, stat = _merge_two_curves(united,
data1d[dist2], qmin, qmax, qsep,
use_additive_constant=additive_constant)
factor = factor1 * factor
uniparams['qmin'][idx] = qmin
uniparams['qmax'][idx] = qmax
uniparams['qsep'][idx] = qsep
print(" Scaling factor is", factor.tostring(), flush=True)
if not additive_constant:
print(" Additive constant has not been used.", flush=True)
else:
print(" Additive constant is:", bg.tostring(), flush=True)
print(" Reduced Chi^2 of the ODR fit:", stat['Chi2_reduced'], flush=True)
print(" DoF of the ODR fit:", stat['DoF'], flush=True)
(data1d[dist2] * factor + bg).loglog(axes=ax, label='%.2f mm' % dist2)
ax.set_xlabel('q (' + qunit() + ')')
ax.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
ax.legend(loc='best')
# ax.grid(which='both')
ax.axis('tight')
ax.set_title('Factor: ' + str(factor))
lims = ax.axis()
ax.plot([qmin, qmin], lims[2:], '--r', lw=2)
ax.plot([qmax, qmax], lims[2:], '--r', lw=2)
ax.plot([qsep, qsep], lims[2:], '--k')
ax.grid(True, which='both')
if '_data1dunited' not in ip.user_ns:
ip.user_ns['_data1dunited'] = {}
united.loglog(axes=unitedaxis)
unitedaxis.set_xlabel('q (' + qunit() + ')')
unitedaxis.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
unitedaxis.legend(loc='best')
unitedaxis.set_title('United scattering of %s' % samplename)
unitedaxis.grid(True, which='both')
# unitedaxis.grid(which='both')
unitedaxis.axis('tight')
lims = unitedaxis.axis()
for qs in uniparams['qsep']:
unitedaxis.plot([qs] * 2, lims[2:], '--r')
ip.user_ns['_data1dunited'][samplename] = united
putlogo()
fig.subplots_adjust(**graph_subplotpars)
plt.savefig(
os.path.join(ip.user_ns['auximages_dir'], 'uniting_' + samplename + '.' + graph_extension), dpi=graph_dpi)
print(" United curve spans the following ranges:")
print(" q_min: ",
print_abscissavalue(united.q.min(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max: ",
print_abscissavalue(united.q.max(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max/q_min:", united.q.max() / united.q.min())
print(" I_min: ", united.Intensity.min(), "cm^{-1}")
print(" I_max: ", united.Intensity.max(), "cm^{-1}")
print(" I_max/I_min:", united.Intensity.max() / united.Intensity.min())
print(" # of points: ", len(united))
united.save(os.path.join(ip.user_ns['saveto_dir'], 'united_' + samplename + '.txt'))
plt.show()
def subtract_bg(samplename, bgname, factor=1, distance=None, disttolerance=2,
subname=None, qrange=(), graph_extension='png', graph_dpi=80):
"""Subtract background from measurements.
Inputs:
samplename: the name of the sample
bgname: the name of the background measurements. Alternatively, it can
be a numeric value (float or ErrorValue), which will be subtracted.
If None, this constant will be determined by integrating the
scattering curve in the range given by qrange.
factor: the background curve will be multiplied by this
distance: if None, do the subtraction for all sample-to-detector distances.
Otherwise give here the value of the sample-to-detector distance.
qrange: a tuple (qmin, qmax)
disttolerance: the tolerance in which two distances are considered
equal.
subname: the sample name of the background-corrected curve. The default
is samplename + '-' + bgname
"""
ip = get_ipython()
data1d = ip.user_ns['_data1d']
data2d = ip.user_ns['_data2d']
if 'subtractedsamplenames' not in ip.user_ns:
ip.user_ns['subtractedsamplenames'] = set()
subtractedsamplenames = ip.user_ns['subtractedsamplenames']
if subname is None:
if isinstance(bgname, str):
subname = samplename + '-' + bgname
else:
subname = samplename + '-const'
if distance is None:
dists = data1d[samplename]
else:
dists = [d for d in data1d[samplename] if abs(d - distance) < disttolerance]
for dist in dists:
if isinstance(bgname, str):
if not disttolerance:
if dist not in data1d[bgname]:
print(
'Warning: Missing distance %g for background measurement (samplename: %s, background samplename: %s)' % (
dist, samplename, bgname))
continue
else:
bgdist = dist
else:
bgdist = sorted([(d, r) for (d, r) in [(d, np.abs(d - dist)) for d in list(data1d[bgname].keys())] if
r <= disttolerance], key=lambda x: x[1])[0][0]
if subname not in data1d:
data1d[subname] = {}
if subname not in data2d:
data2d[subname] = {}
if subname not in ip.user_ns['_headers_sample']:
ip.user_ns['_headers_sample'][subname] = {}
data1_s = data1d[samplename][dist]
data2_s = data2d[samplename][dist]
if isinstance(bgname, str):
data1_bg = data1d[bgname][bgdist]
data2_bg = data2d[bgname][bgdist]
if factor is None:
factor = data1_s.trim(*qrange).momentum(0) / data1_bg.trim(*qrange).momentum(0)
elif bgname is None:
data1_bg = data1_s.trim(*qrange).momentum(0)
data2_bg = data1_bg
else:
data1_bg = bgname
data2_bg = bgname
if factor is None:
factor = 1
data1d[subname][dist] = data1_s - factor * data1_bg
data2d[subname][dist] = data2_s - factor * data2_bg
data1d[subname][dist].save(
os.path.join(ip.user_ns['saveto_dir'], subname + '_' + ('%.2f' % dist).replace('.', '_') + '.txt'))
ip.user_ns['_headers_sample'][subname][dist] = ip.user_ns['_headers_sample'][samplename][
dist] # ugly hack, I have no better idea.
plt.figure()
plotsascurve(samplename, dist=dist)
if isinstance(bgname, str):
plotsascurve(bgname, dist=dist, factor=factor)
plotsascurve(subname, dist=dist)
plt.savefig(os.path.join(ip.user_ns['auximages_dir'],
'subtractbg_' + samplename + '.' + graph_extension),
dpi=graph_dpi)
subtractedsamplenames.add(subname)
|
awacha/credolib
|
credolib/procedures.py
|
subtract_bg
|
python
|
def subtract_bg(samplename, bgname, factor=1, distance=None, disttolerance=2,
subname=None, qrange=(), graph_extension='png', graph_dpi=80):
ip = get_ipython()
data1d = ip.user_ns['_data1d']
data2d = ip.user_ns['_data2d']
if 'subtractedsamplenames' not in ip.user_ns:
ip.user_ns['subtractedsamplenames'] = set()
subtractedsamplenames = ip.user_ns['subtractedsamplenames']
if subname is None:
if isinstance(bgname, str):
subname = samplename + '-' + bgname
else:
subname = samplename + '-const'
if distance is None:
dists = data1d[samplename]
else:
dists = [d for d in data1d[samplename] if abs(d - distance) < disttolerance]
for dist in dists:
if isinstance(bgname, str):
if not disttolerance:
if dist not in data1d[bgname]:
print(
'Warning: Missing distance %g for background measurement (samplename: %s, background samplename: %s)' % (
dist, samplename, bgname))
continue
else:
bgdist = dist
else:
bgdist = sorted([(d, r) for (d, r) in [(d, np.abs(d - dist)) for d in list(data1d[bgname].keys())] if
r <= disttolerance], key=lambda x: x[1])[0][0]
if subname not in data1d:
data1d[subname] = {}
if subname not in data2d:
data2d[subname] = {}
if subname not in ip.user_ns['_headers_sample']:
ip.user_ns['_headers_sample'][subname] = {}
data1_s = data1d[samplename][dist]
data2_s = data2d[samplename][dist]
if isinstance(bgname, str):
data1_bg = data1d[bgname][bgdist]
data2_bg = data2d[bgname][bgdist]
if factor is None:
factor = data1_s.trim(*qrange).momentum(0) / data1_bg.trim(*qrange).momentum(0)
elif bgname is None:
data1_bg = data1_s.trim(*qrange).momentum(0)
data2_bg = data1_bg
else:
data1_bg = bgname
data2_bg = bgname
if factor is None:
factor = 1
data1d[subname][dist] = data1_s - factor * data1_bg
data2d[subname][dist] = data2_s - factor * data2_bg
data1d[subname][dist].save(
os.path.join(ip.user_ns['saveto_dir'], subname + '_' + ('%.2f' % dist).replace('.', '_') + '.txt'))
ip.user_ns['_headers_sample'][subname][dist] = ip.user_ns['_headers_sample'][samplename][
dist] # ugly hack, I have no better idea.
plt.figure()
plotsascurve(samplename, dist=dist)
if isinstance(bgname, str):
plotsascurve(bgname, dist=dist, factor=factor)
plotsascurve(subname, dist=dist)
plt.savefig(os.path.join(ip.user_ns['auximages_dir'],
'subtractbg_' + samplename + '.' + graph_extension),
dpi=graph_dpi)
subtractedsamplenames.add(subname)
|
Subtract background from measurements.
Inputs:
samplename: the name of the sample
bgname: the name of the background measurements. Alternatively, it can
be a numeric value (float or ErrorValue), which will be subtracted.
If None, this constant will be determined by integrating the
scattering curve in the range given by qrange.
factor: the background curve will be multiplied by this
distance: if None, do the subtraction for all sample-to-detector distances.
Otherwise give here the value of the sample-to-detector distance.
qrange: a tuple (qmin, qmax)
disttolerance: the tolerance in which two distances are considered
equal.
subname: the sample name of the background-corrected curve. The default
is samplename + '-' + bgname
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/procedures.py#L519-L602
|
[
"def plotsascurve(samplename, *args, **kwargs):\n if 'dist' not in kwargs:\n kwargs['dist'] = None\n data1d, dist = getsascurve(samplename, kwargs['dist'])\n del kwargs['dist']\n if 'factor' in kwargs:\n factor=kwargs['factor']\n del kwargs['factor']\n else:\n factor=1\n if 'label' not in kwargs:\n if isinstance(dist, str):\n kwargs['label'] = samplename + ' ' + dist\n else:\n kwargs['label'] = samplename + ' %g mm' % dist\n if 'errorbar' in kwargs:\n errorbars = bool(kwargs['errorbar'])\n del kwargs['errorbar']\n else:\n errorbars = False\n if errorbars:\n ret = (data1d*factor).errorbar(*args, **kwargs)\n plt.xscale('log')\n plt.yscale('log')\n else:\n ret = (data1d*factor).loglog(*args, **kwargs)\n plt.xlabel('q (' + qunit() + ')')\n plt.ylabel('$d\\\\Sigma/d\\\\Omega$ (cm$^{-1}$ sr$^{-1}$)')\n plt.legend(loc='best')\n plt.grid(True, which='both')\n plt.axis('tight')\n return ret\n"
] |
__all__ = ['summarize', 'unite', 'subtract_bg']
import numbers
import os
import sys
import traceback
import ipy_table
import matplotlib
import matplotlib.cm
import matplotlib.colors
import matplotlib.pyplot as plt
import numpy as np
from IPython.core.getipython import get_ipython
from IPython.display import display
from mpl_toolkits.axes_grid import make_axes_locatable
from sastool.classes2 import Curve, Exposure
from sastool.libconfig import qunit
from sastool.misc.easylsq import FixedParameter, nonlinear_odr
from sastool.misc.errorvalue import ErrorValue
from .atsas import datcmp
from .calculation import correlmatrix
from .io import get_different_distances, load_exposure
from .plotting import plotsascurve
from .utils import print_abscissavalue, putlogo, writemarkdown
def _collect_data_for_summarization(headers, raw, reintegrate, qrange):
ip = get_ipython()
data1d = []
data2d = 0
headersout = []
if not headers:
return
for head in headers:
try:
mo = ip.user_ns['mask_override'](head)
except KeyError:
mo = None
ex = None
last_exception = None
try:
ex = load_exposure(head.fsn, raw=raw, processed=not raw)
assert isinstance(ex, Exposure)
if mo is not None:
try:
ex.mask = ex.loader.loadmask(mo)
except FileNotFoundError:
print('Could not load mask: %s' % mo)
raise FileNotFoundError('Could not load mask: %s' % mo)
except FileNotFoundError as exc:
last_exception = sys.exc_info()
if ex is None:
print('Could not load {} 2D file for FSN {:d}. Exception: {}'.format(
['processed', 'raw'][raw], head.fsn, '\n'.join(traceback.format_exception(*last_exception))))
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns'])
ip.user_ns['badfsns'].add(head.fsn)
continue
ex.header = head
curve = None
if not reintegrate:
for l in [l_ for l_ in ip.user_ns['_loaders'] if l_.processed != raw]:
try:
curve = l.loadcurve(head.fsn)
break
except FileNotFoundError:
continue
if curve is None:
print('Cannot load curve for FSN %d: reintegrating.' % head.fsn)
if curve is None:
# this happens if reintegrate==True or if reintegrate==False but the curve could not be loaded.
curve = ex.radial_average(qrange, errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False)
curve = curve.sanitize()
data1d.append(curve)
data1d[-1].save(os.path.join(ip.user_ns['saveto_dir'], 'curve_%05d.txt' % head.fsn))
mat = np.zeros((len(data1d[-1]), 3))
mat[:, 0] = data1d[-1].q
mat[:, 1] = data1d[-1].Intensity
mat[:, 2] = data1d[-1].Error
np.savetxt(os.path.join(ip.user_ns['saveto_dir'], 'curve_%s_%05d.dat' % (head.title, head.fsn)), mat)
del mat
data2d = data2d + ex
headersout.append(ex.header)
data2d /= len(data1d)
return data1d, data2d, headersout
def _stabilityassessment(headers, data1d, dist, fig_correlmatrices, correlmatrixaxes, std_multiplier,
correlmatrix_colormap,
correlmatrix_filename, logarithmic_correlmatrix=True, cormaptest=True):
# calculate and plot correlation matrix
cmatrix, badidx, rowavg = correlmatrix(data1d, std_multiplier, logarithmic_correlmatrix)
rowavgmean = rowavg.mean()
rowavgstd = rowavg.std()
writemarkdown('#### Assessing sample stability')
writemarkdown("- Mean of row averages: " + str(rowavgmean))
writemarkdown("- Std of row averages: " + str(rowavgstd) + ' (%.2f %%)' % (rowavgstd / rowavgmean * 100))
img = correlmatrixaxes.imshow(cmatrix, interpolation='nearest', cmap=matplotlib.cm.get_cmap(correlmatrix_colormap))
cax = make_axes_locatable(correlmatrixaxes).append_axes('right', size="5%", pad=0.1)
fig_correlmatrices.colorbar(img, cax=cax)
fsns = [h.fsn for h in headers]
correlmatrixaxes.set_title('%.2f mm' % dist)
correlmatrixaxes.set_xticks(list(range(len(data1d))))
correlmatrixaxes.set_xticklabels([str(f) for f in fsns], rotation='vertical')
correlmatrixaxes.set_yticks(list(range(len(data1d))))
correlmatrixaxes.set_yticklabels([str(f) for f in fsns])
np.savez_compressed(correlmatrix_filename,
correlmatrix=cmatrix, fsns=np.array(fsns))
# Report table on sample stability
tab = [['FSN', 'Date', 'Discrepancy', 'Relative discrepancy ((x-mean(x))/std(x))', 'Quality', 'Quality (cormap)']]
badfsns = []
badfsns_datcmp = []
if cormaptest:
matC, matp, matpadj, datcmp_ok = datcmp(*data1d)
else:
datcmp_ok = [not x for x in badidx]
for h, bad, discr, dcmp_ok in zip(headers, badidx, rowavg, datcmp_ok):
tab.append([h.fsn, h.date.isoformat(), discr, (discr - rowavgmean) / rowavgstd,
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][bad],
["\u2713", "\u2718\u2718\u2718\u2718\u2718"][dcmp_ok != 1]])
if bad:
badfsns.append(h.fsn)
if (not dcmp_ok and not np.isnan(dcmp_ok)):
badfsns_datcmp.append(h.fsn)
tab = ipy_table.IpyTable(tab)
tab.apply_theme('basic')
return badfsns, badfsns_datcmp, tab, rowavg
def summarize(reintegrate=True, dist_tolerance=3, qranges=None,
samples=None, raw=False, late_radavg=True, graph_ncols=3,
std_multiplier=3, graph_extension='png',
graph_dpi=80, correlmatrix_colormap='coolwarm',
image_colormap='viridis', correlmatrix_logarithmic=True, cormaptest=True):
"""Summarize scattering patterns and curves for all samples defined
by the global `allsamplenames`.
Inputs:
reintegrate (bool, default=True): if the curves are to be obained
by reintegrating the patterns. Otherwise 1D curves are loaded.
dist_tolerance (float, default=3): sample-to-detector distances
nearer than this are considered the same
qranges (dict): a dictionary mapping approximate sample-to-detector
distances (within dist_tolerance) to one-dimensional np.ndarrays
of the desired q-range of the reintegration.
samples (list or None): the names of the samples to summarize. If
None, all samples defined by ``allsamplenames`` are used.
raw (bool, default=False): if raw images are to be treated instead
the evaluated ones (default).
late_radavg (bool, default=True): if the scattering curves are to
be calculated from the summarized scattering pattern. If False,
scattering curves are calculated from each pattern and will be
averaged.
graph_ncols: the number of columns in graphs (2D patterns,
correlation matrices)
std_multiplier: if the absolute value of the relative discrepancy
is larger than this limit, the exposure is deemed an outlier.
graph_extension: the extension of the produced hardcopy files.
graph_dpi: resolution of the graphs
correlmatrix_colormap: name of the colormap to be used for the
correlation matrices (resolved by matplotlib.cm.get_cmap())
image_colormap: name of the colormap to be used for the scattering
patterns (resolved by matplotlib.cm.get_cmap())
correlmatrix_logarithmic: if the correlation matrix has to be
calculated from the logarithm of the intensity.
"""
if qranges is None:
qranges = {}
ip = get_ipython()
data2d = {}
data1d = {}
headers_tosave = {}
rowavg = {}
if raw:
writemarkdown('# Summarizing RAW images.')
headers = ip.user_ns['_headers']['raw']
rawpart = '_raw' # this will be added in the filenames saved
else:
writemarkdown('# Summarizing CORRECTED images.')
headers = ip.user_ns['_headers']['processed']
rawpart = '' # nothing will be added in the filenames saved
if samples is None:
samples = sorted(ip.user_ns['allsamplenames'])
for samplename in samples:
writemarkdown('## ' + samplename)
headers_sample = [h for h in headers if h.title == samplename]
data2d[samplename] = {}
rowavg[samplename] = {}
data1d[samplename] = {}
headers_tosave[samplename] = {}
dists = get_different_distances([h for h in headers if h.title == samplename], dist_tolerance)
if not dists:
writemarkdown('No measurements from sample, skipping.')
continue
fig_2d = plt.figure()
fig_curves = plt.figure()
fig_correlmatrices = plt.figure()
distaxes = {}
correlmatrixaxes = {}
ncols = min(len(dists), graph_ncols)
nrows = int(np.ceil(len(dists) / ncols))
onedimaxes = fig_curves.add_axes((0.1, 0.3, 0.8, 0.5))
onedimstdaxes = fig_curves.add_axes((0.1, 0.1, 0.8, 0.2))
for distidx, dist in enumerate(dists):
writemarkdown("### Distance " + str(dist) + " mm")
headers_narrowed = [h for h in headers_sample if abs(float(h.distance) - dist) < dist_tolerance]
distaxes[dist] = fig_2d.add_subplot(
nrows, ncols, distidx + 1)
correlmatrixaxes[dist] = fig_correlmatrices.add_subplot(
nrows, ncols, distidx + 1)
# determine the q-range to be used from the qranges argument.
try:
distkey_min = min([np.abs(k - dist)
for k in qranges if np.abs(k - dist) < dist_tolerance])
except ValueError:
# no matching key in qranges dict
qrange = None # request auto-determination of q-range
else:
distkey = [
k for k in qranges if np.abs(k - dist) == distkey_min][0]
qrange = qranges[distkey]
(data1d[samplename][dist], data2d[samplename][dist], headers_tosave[samplename][dist]) = \
_collect_data_for_summarization(headers_narrowed, raw, reintegrate, qrange)
badfsns, badfsns_datcmp, tab, rowavg[samplename][dist] = _stabilityassessment(
headers_tosave[samplename][dist],
data1d[samplename][dist], dist,
fig_correlmatrices,
correlmatrixaxes[dist], std_multiplier, correlmatrix_colormap,
os.path.join(ip.user_ns['saveto_dir'], 'correlmatrix_%s_%s' % (
samplename,
('%.2f' % dist).replace('.', '_')) +
rawpart + '.npz'),
logarithmic_correlmatrix=correlmatrix_logarithmic,
cormaptest=cormaptest)
if 'badfsns' not in ip.user_ns:
ip.user_ns['badfsns'] = {}
elif 'badfsns_datcmp' not in ip.user_ns:
ip.user_ns['badfsns_datcmp'] = {}
ip.user_ns['badfsns'] = set(ip.user_ns['badfsns']).union(badfsns)
ip.user_ns['badfsns_datcmp'] = set(ip.user_ns['badfsns_datcmp']).union(badfsns_datcmp)
display(tab)
# Plot the image
try:
data2d[samplename][dist].imshow(axes=distaxes[dist], show_crosshair=False,
norm=matplotlib.colors.LogNorm(),
cmap=matplotlib.cm.get_cmap(image_colormap))
except ValueError:
print('Error plotting 2D image for sample %s, distance %.2f' % (samplename, dist))
distaxes[dist].set_xlabel('q (' + qunit() + ')')
distaxes[dist].set_ylabel('q (' + qunit() + ')')
distaxes[dist].set_title(
'%.2f mm (%d curve%s)' % (dist, len(headers_tosave[samplename][dist]),
['', 's'][len(headers_tosave[samplename][dist]) > 1]))
# Plot the curves
Istd = np.stack([c.Intensity for c in data1d[samplename][dist]], axis=1)
for c, h in zip(data1d[samplename][dist], headers_tosave[samplename][dist]):
color = 'green'
if h.fsn in badfsns_datcmp:
color = 'magenta'
if h.fsn in badfsns:
color = 'red'
c.loglog(axes=onedimaxes, color=color)
if Istd.shape[1] > 1:
onedimstdaxes.loglog(data1d[samplename][dist][0].q, Istd.std(axis=1) / Istd.mean(axis=1) * 100, 'b-')
if not late_radavg:
data1d[samplename][dist] = Curve.average(
*data1d[samplename][dist])
else:
data1d[samplename][dist] = (
data2d[samplename][dist].radial_average(
qrange,
errorpropagation=3,
abscissa_errorpropagation=3, raw_result=False))
data1d[samplename][dist].loglog(
label='Average', lw=2, color='k', axes=onedimaxes)
##Saving image, headers, mask and curve
# data2d[samplename][dist].write(
# os.path.join(ip.user_ns['saveto_dir'],
# samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart + '.npz'), plugin='CREDO Reduced')
# data2d[samplename][dist].header.write(
# os.path.join(ip.user_ns['saveto_dir'],
### samplename + '_'+(
# '%.2f' % dist).replace('.', '_') +
# rawpart +'.log'), plugin='CREDO Reduced')
# data2d[samplename][dist].mask.write_to_mat(
# os.path.join(ip.user_ns['saveto_dir'],
# data2d[samplename][dist].mask.maskid+'.mat'))
data1d[samplename][dist].save(os.path.join(ip.user_ns['saveto_dir'],
samplename + '_' + ('%.2f' % dist).replace('.',
'_') + rawpart + '.txt'))
# Report on qrange and flux
q_ = data1d[samplename][dist].q
qmin = q_[q_ > 0].min()
writemarkdown('#### Q-range & flux')
writemarkdown(
'- $q_{min}$: ' + print_abscissavalue(qmin, headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- $q_{max}$: ' + print_abscissavalue(data1d[samplename][dist].q.max(),
headers_tosave[samplename][dist][0].wavelength, dist))
writemarkdown('- Number of $q$ points: ' + str(len(data1d[samplename][dist])))
meastime = sum([h.exposuretime for h in headers_tosave[samplename][dist]])
writemarkdown("- from %d exposures, total exposure time %.0f sec <=> %.2f hr" % (
len(headers_tosave[samplename][dist]),
meastime, meastime / 3600.))
try:
flux = [h.flux for h in headers_tosave[samplename][dist]]
flux = ErrorValue(np.mean(flux), np.std(flux))
writemarkdown("- beam flux (photon/sec): %s" % flux)
except KeyError:
writemarkdown("- *No information on beam flux: dealing with raw data.*")
onedimaxes.set_xlabel('')
onedimaxes.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
# plt.legend(loc='best')
onedimaxes.grid(True, which='both')
onedimaxes.axis('tight')
onedimaxes.set_title(samplename)
onedimstdaxes.set_xlabel('q (' + qunit() + ')')
onedimstdaxes.set_ylabel('Rel.std.dev. of intensity (%)')
onedimstdaxes.grid(True, which='both')
onedimstdaxes.set_xlim(*onedimaxes.get_xlim())
onedimstdaxes.set_xscale(onedimaxes.get_xscale())
putlogo(fig_curves)
putlogo(fig_2d)
fig_2d.tight_layout()
fig_correlmatrices.suptitle(samplename)
fig_correlmatrices.tight_layout()
fig_2d.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging2D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
fig_curves.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'averaging1D_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
putlogo(fig_correlmatrices)
fig_correlmatrices.savefig(
os.path.join(ip.user_ns['auximages_dir'],
'correlation_' +
samplename + rawpart + '.' + graph_extension),
dpi=graph_dpi)
writemarkdown("### Collected images from all distances")
plt.show()
writemarkdown("Updated badfsns list:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns']) + ']')
writemarkdown("Updated badfsns list using datcmp:")
writemarkdown('[' + ', '.join(str(f) for f in ip.user_ns['badfsns_datcmp']) + ']')
ip.user_ns['_data1d'] = data1d
ip.user_ns['_data2d'] = data2d
ip.user_ns['_headers_sample'] = headers_tosave
ip.user_ns['_rowavg'] = rowavg
def _merge_two_curves(curve1: Curve, curve2: Curve, qmin, qmax, qsep, use_additive_constant=False):
"""Merge two scattering curves
:param curve1: the first curve (longer distance)
:type curve1: sastool.classes.curve.GeneralCurve
:param curve2: the second curve (shorter distance)
:type curve2: sastool.classes.curve.GeneralCurve
:param qmin: lower bound of the interval for determining the scaling factor
:type qmin: float
:param qmax: upper bound of the interval for determining the scaling factor
:type qmax: float
:param qsep: separating (tailoring) point for the merge
:type qsep: float
:return: merged_curve, factor, background, stat
:rtype tuple of a sastool.classes2.curve.Curve and a float
"""
curve1=curve1.sanitize()
curve2=curve2.sanitize()
if len(curve1.trim(qmin, qmax)) > len(curve2.trim(qmin, qmax)):
curve2_interp = curve2.trim(qmin, qmax)
curve1_interp = curve1.interpolate(curve2_interp.q)
else:
curve1_interp = curve1.trim(qmin, qmax)
curve2_interp = curve2.interpolate(curve1_interp.q)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(curve2_interp.Intensity, curve1_interp.Intensity,
curve2_interp.Error, curve1_interp.Error,
lambda x, factor, bg: x * factor + bg, [1.0, bg_init])
return Curve.merge(curve1 - bg, curve2 * factor, qsep), factor, bg, stat
def _scale_two_exposures(exp1, exp2, qmin, qmax, N=10, use_additive_constant=False):
qrange = np.linspace(qmin, qmax, N)
rad1 = exp1.radial_average(qrange=qrange, raw_result=False)
rad2 = exp2.radial_average(qrange=qrange, raw_result=False)
if use_additive_constant:
bg_init = 0
else:
bg_init = FixedParameter(0)
factor, bg, stat = nonlinear_odr(rad2.y, rad1.y, rad2.dy, rad1.dy, lambda x, factor, bg: x * factor + bg,
[1, bg_init])
return factor, bg
def unite(samplename, uniqmin=[], uniqmax=[], uniqsep=[], graph_ncols=2, graph_subplotpars={'hspace': 0.3},
graph_extension='png', graph_dpi=80, additive_constant=False):
ip = get_ipython()
if isinstance(uniqmin, numbers.Number):
uniqmin = [uniqmin]
if isinstance(uniqmax, numbers.Number):
uniqmax = [uniqmax]
if isinstance(uniqsep, numbers.Number):
uniqsep = [uniqsep]
data1d = ip.user_ns['_data1d'][samplename]
print("Uniting measurements of sample %s at different s-d distances" % samplename)
uniparams = {'qmin': uniqmin, 'qmax': uniqmax, 'qsep': uniqsep}
for p in uniparams:
uniparams[p] = uniparams[p] + [None] * \
max(0, len(data1d) - 1 - len(uniparams[p]))
dists = list(reversed(sorted(data1d.keys())))
if len(dists) < 2:
print("Less than two distances found for sample %s; no point of uniting." % samplename)
return
united = None
graph_nrows = int(
np.ceil((len(dists)) / (graph_ncols * 1.0)))
fig = plt.figure()
unitedaxis = fig.add_subplot(graph_nrows, graph_ncols, 1)
factor = 1.0
for idx, dist1, dist2, qmin, qmax, qsep in zip(list(range(len(dists) - 1)),
dists[:-1], dists[1:],
uniparams['qmin'],
uniparams['qmax'],
uniparams['qsep']):
print(" Scaling together distances %f and %f mm" % (dist1, dist2), flush=True)
if united is None:
united = data1d[dist1]
if qmin is None:
qmin = data1d[dist2].sanitize().q.min()
print(" Auto-detected qmin:", qmin, flush=True)
if qmax is None:
qmax = data1d[dist1].sanitize().q.max()
print(" Auto-detected qmax:", qmax, flush=True)
if qsep is None:
qsep = 0.5 * (qmin + qmax)
print(" Auto-detected qsep:", qsep, flush=True)
ax = fig.add_subplot(graph_nrows, graph_ncols, 2 + idx)
(factor * data1d[dist1]).loglog(axes=ax, label='%.2f mm' % dist1)
united, factor1, bg, stat = _merge_two_curves(united,
data1d[dist2], qmin, qmax, qsep,
use_additive_constant=additive_constant)
factor = factor1 * factor
uniparams['qmin'][idx] = qmin
uniparams['qmax'][idx] = qmax
uniparams['qsep'][idx] = qsep
print(" Scaling factor is", factor.tostring(), flush=True)
if not additive_constant:
print(" Additive constant has not been used.", flush=True)
else:
print(" Additive constant is:", bg.tostring(), flush=True)
print(" Reduced Chi^2 of the ODR fit:", stat['Chi2_reduced'], flush=True)
print(" DoF of the ODR fit:", stat['DoF'], flush=True)
(data1d[dist2] * factor + bg).loglog(axes=ax, label='%.2f mm' % dist2)
ax.set_xlabel('q (' + qunit() + ')')
ax.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
ax.legend(loc='best')
# ax.grid(which='both')
ax.axis('tight')
ax.set_title('Factor: ' + str(factor))
lims = ax.axis()
ax.plot([qmin, qmin], lims[2:], '--r', lw=2)
ax.plot([qmax, qmax], lims[2:], '--r', lw=2)
ax.plot([qsep, qsep], lims[2:], '--k')
ax.grid(True, which='both')
if '_data1dunited' not in ip.user_ns:
ip.user_ns['_data1dunited'] = {}
united.loglog(axes=unitedaxis)
unitedaxis.set_xlabel('q (' + qunit() + ')')
unitedaxis.set_ylabel('$d\\Sigma/d\\Omega$ (cm$^{-1}$ sr$^{-1}$)')
unitedaxis.legend(loc='best')
unitedaxis.set_title('United scattering of %s' % samplename)
unitedaxis.grid(True, which='both')
# unitedaxis.grid(which='both')
unitedaxis.axis('tight')
lims = unitedaxis.axis()
for qs in uniparams['qsep']:
unitedaxis.plot([qs] * 2, lims[2:], '--r')
ip.user_ns['_data1dunited'][samplename] = united
putlogo()
fig.subplots_adjust(**graph_subplotpars)
plt.savefig(
os.path.join(ip.user_ns['auximages_dir'], 'uniting_' + samplename + '.' + graph_extension), dpi=graph_dpi)
print(" United curve spans the following ranges:")
print(" q_min: ",
print_abscissavalue(united.q.min(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max: ",
print_abscissavalue(united.q.max(), ip.user_ns['_headers_sample'][samplename][dists[0]][0].wavelength))
print(" q_max/q_min:", united.q.max() / united.q.min())
print(" I_min: ", united.Intensity.min(), "cm^{-1}")
print(" I_max: ", united.Intensity.max(), "cm^{-1}")
print(" I_max/I_min:", united.Intensity.max() / united.Intensity.min())
print(" # of points: ", len(united))
united.save(os.path.join(ip.user_ns['saveto_dir'], 'united_' + samplename + '.txt'))
plt.show()
|
awacha/credolib
|
credolib/utils.py
|
putlogo
|
python
|
def putlogo(figure=None):
ip = get_ipython()
if figure is None:
figure=plt.gcf()
curraxis= figure.gca()
logoaxis = figure.add_axes([0.89, 0.01, 0.1, 0.1], anchor='NW')
logoaxis.set_axis_off()
logoaxis.xaxis.set_visible(False)
logoaxis.yaxis.set_visible(False)
logoaxis.imshow(credo_logo)
figure.subplots_adjust(right=0.98)
figure.sca(curraxis)
|
Puts the CREDO logo at the bottom right of the current figure (or
the figure given by the ``figure`` argument if supplied).
|
train
|
https://github.com/awacha/credolib/blob/11c0be3eea7257d3d6e13697d3e76ce538f2f1b2/credolib/utils.py#L16-L30
| null |
__all__=['writemarkdown','putlogo','print_abscissavalue','figsize']
from IPython.display import display,Markdown
from IPython.core.getipython import get_ipython
import matplotlib.pyplot as plt
import sastool
import numpy as np
import pkg_resources
from scipy.misc import imread
credo_logo = imread(pkg_resources.resource_filename('credolib','resource/credo_logo.png'))
def writemarkdown(*args):
display(Markdown(' '.join(str(a) for a in args)))
def print_abscissavalue(q, wavelength=None, distance=None, digits=10):
qunit = sastool.libconfig.qunit()
dunit = sastool.libconfig.dunit()
formatstring='%%.%df'%digits
retval = str(q) + ' ' + qunit
retval = retval + "("
retval = retval + " <=> " + formatstring %(2 * np.pi / q) + " " + dunit + "(d)"
retval = retval + " <=> " + formatstring %(1 / q) + " " + dunit + "(Rg)"
if wavelength is not None:
tth_rad = 2 * np.arcsin((q * wavelength) / 4 / np.pi)
tth_deg = tth_rad * 180.0 / np.pi
retval = retval + " <=> " + formatstring %(tth_deg) + "\xb0"
if distance is not None:
radius = np.tan(tth_rad) * distance
retval = retval + " <=> " + formatstring % (radius) + " mm(r)"
retval = retval + ")"
return retval
class figsize(object):
def __init__(self, sizex, sizey):
self._originalsize=plt.rcParams['figure.figsize']
plt.rcParams['figure.figsize']=(sizex, sizey)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
plt.rcParams['figure.figsize']=self._originalsize
return False # we don't want to suppress the exception, if any
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.image_by_id
|
python
|
def image_by_id(self, id):
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
|
Return image with given Id
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L21-L28
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.image_by_tag
|
python
|
def image_by_tag(self, tag):
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
|
Return image with given tag
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L30-L38
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.image_exists
|
python
|
def image_exists(self, id=None, tag=None):
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
|
Check if specified image exists
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L40-L50
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.container_by_id
|
python
|
def container_by_id(self, id):
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
|
Returns container with given id
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L52-L59
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.container_by_name
|
python
|
def container_by_name(self, name):
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
|
Returns container with given name
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L61-L71
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.container_exists
|
python
|
def container_exists(self, id=None, name=None):
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
|
Checks if container exists already
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L73-L83
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.container_running
|
python
|
def container_running(self, id=None, name=None):
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
|
Checks if container is running
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L85-L94
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def get_container_ip(self, container):
"""
Returns the internal ip of the container if available
"""
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
kshlm/gant
|
gant/utils/docker_helper.py
|
DockerHelper.get_container_ip
|
python
|
def get_container_ip(self, container):
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip
|
Returns the internal ip of the container if available
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L96-L112
| null |
class DockerHelper (docker.Client):
"""
Extended docker client with some helper functions
"""
def __init__(self):
super(DockerHelper, self).__init__(version=DEFAULT_DOCKER_API_VERSION)
def image_by_id(self, id):
"""
Return image with given Id
"""
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None)
def image_by_tag(self, tag):
"""
Return image with given tag
"""
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None)
def image_exists(self, id=None, tag=None):
"""
Check if specified image exists
"""
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists
def container_by_id(self, id):
"""
Returns container with given id
"""
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None)
def container_by_name(self, name):
"""
Returns container with given name
"""
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None)
def container_exists(self, id=None, name=None):
"""
Checks if container exists already
"""
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists
def container_running(self, id=None, name=None):
"""
Checks if container is running
"""
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running
|
kshlm/gant
|
gant/utils/ssh.py
|
launch_shell
|
python
|
def launch_shell(username, hostname, password, port=22):
if not username or not hostname or not password:
return False
with tempfile.NamedTemporaryFile() as tmpFile:
os.system(sshCmdLine.format(password, tmpFile.name, username, hostname,
port))
return True
|
Launches an ssh shell
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/ssh.py#L10-L20
| null |
from __future__ import unicode_literals, print_function
import os
import tempfile
sshCmdLine = ('sshpass -p {0} ssh -q -o UserKnownHostsFile={1} '
'-o StrictHostKeyChecking=no {2}@{3} -p {4}')
def do_cmd(username, hostname, password, command, port=22):
"""
Runs a command via ssh
"""
if not username or not hostname or not password or not command:
return False
with tempfile.NamedTemporaryFile() as tmpFile:
os.system("{0} {1}".format(sshCmdLine.format(password, tmpFile.name,
username, hostname, port),
command))
return True
|
kshlm/gant
|
gant/utils/gant_docker.py
|
check_permissions
|
python
|
def check_permissions():
if (
not grp.getgrnam('docker').gr_gid in os.getgroups()
and not os.geteuid() == 0
):
exitStr = """
User doesn't have permission to use docker.
You can do either of the following,
1. Add user to the 'docker' group (preferred)
2. Run command as superuser using either 'sudo' or 'su -c'
"""
exit(exitStr)
|
Checks if current user can access docker
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L14-L28
| null |
from __future__ import unicode_literals
import os
import grp
import time
import json
from click import echo
from .docker_helper import DockerHelper
from . import ssh
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
def build_main_image_cmd(self, srcdir, force):
"""
Build the main image to be used for launching containers
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
def launch_cmd(self, n, force):
"""
Launch the specified docker containers using the main image
"""
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
def stop_cmd(self, name, force):
"""
Stop the specified or all docker containers launched by us
"""
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
def ip_cmd(self, name):
"""
Print ip of given container
"""
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/utils/gant_docker.py
|
GantDocker.build_base_image_cmd
|
python
|
def build_base_image_cmd(self, force):
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
|
Build the glusterbase image
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L50-L76
|
[
"def check_permissions():\n \"\"\"\n Checks if current user can access docker\n \"\"\"\n if (\n not grp.getgrnam('docker').gr_gid in os.getgroups()\n and not os.geteuid() == 0\n ):\n exitStr = \"\"\"\n User doesn't have permission to use docker.\n You can do either of the following,\n 1. Add user to the 'docker' group (preferred)\n 2. Run command as superuser using either 'sudo' or 'su -c'\n \"\"\"\n exit(exitStr)\n"
] |
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_main_image_cmd(self, srcdir, force):
"""
Build the main image to be used for launching containers
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
def launch_cmd(self, n, force):
"""
Launch the specified docker containers using the main image
"""
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
def stop_cmd(self, name, force):
"""
Stop the specified or all docker containers launched by us
"""
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
def ip_cmd(self, name):
"""
Print ip of given container
"""
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/utils/gant_docker.py
|
GantDocker.build_main_image_cmd
|
python
|
def build_main_image_cmd(self, srcdir, force):
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
|
Build the main image to be used for launching containers
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L78-L118
|
[
"def check_permissions():\n \"\"\"\n Checks if current user can access docker\n \"\"\"\n if (\n not grp.getgrnam('docker').gr_gid in os.getgroups()\n and not os.geteuid() == 0\n ):\n exitStr = \"\"\"\n User doesn't have permission to use docker.\n You can do either of the following,\n 1. Add user to the 'docker' group (preferred)\n 2. Run command as superuser using either 'sudo' or 'su -c'\n \"\"\"\n exit(exitStr)\n"
] |
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
def launch_cmd(self, n, force):
"""
Launch the specified docker containers using the main image
"""
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
def stop_cmd(self, name, force):
"""
Stop the specified or all docker containers launched by us
"""
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
def ip_cmd(self, name):
"""
Print ip of given container
"""
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/utils/gant_docker.py
|
GantDocker.launch_cmd
|
python
|
def launch_cmd(self, n, force):
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
|
Launch the specified docker containers using the main image
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L120-L150
|
[
"def check_permissions():\n \"\"\"\n Checks if current user can access docker\n \"\"\"\n if (\n not grp.getgrnam('docker').gr_gid in os.getgroups()\n and not os.geteuid() == 0\n ):\n exitStr = \"\"\"\n User doesn't have permission to use docker.\n You can do either of the following,\n 1. Add user to the 'docker' group (preferred)\n 2. Run command as superuser using either 'sudo' or 'su -c'\n \"\"\"\n exit(exitStr)\n"
] |
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
def build_main_image_cmd(self, srcdir, force):
"""
Build the main image to be used for launching containers
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
def stop_cmd(self, name, force):
"""
Stop the specified or all docker containers launched by us
"""
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
def ip_cmd(self, name):
"""
Print ip of given container
"""
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/utils/gant_docker.py
|
GantDocker.stop_cmd
|
python
|
def stop_cmd(self, name, force):
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
|
Stop the specified or all docker containers launched by us
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L152-L162
|
[
"def check_permissions():\n \"\"\"\n Checks if current user can access docker\n \"\"\"\n if (\n not grp.getgrnam('docker').gr_gid in os.getgroups()\n and not os.geteuid() == 0\n ):\n exitStr = \"\"\"\n User doesn't have permission to use docker.\n You can do either of the following,\n 1. Add user to the 'docker' group (preferred)\n 2. Run command as superuser using either 'sudo' or 'su -c'\n \"\"\"\n exit(exitStr)\n"
] |
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
def build_main_image_cmd(self, srcdir, force):
"""
Build the main image to be used for launching containers
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
def launch_cmd(self, n, force):
"""
Launch the specified docker containers using the main image
"""
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
def ip_cmd(self, name):
"""
Print ip of given container
"""
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/utils/gant_docker.py
|
GantDocker.ssh_cmd
|
python
|
def ssh_cmd(self, name, ssh_command):
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
|
SSH into given container and executre command if given
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L170-L187
| null |
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
def build_main_image_cmd(self, srcdir, force):
"""
Build the main image to be used for launching containers
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
def launch_cmd(self, n, force):
"""
Launch the specified docker containers using the main image
"""
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
def stop_cmd(self, name, force):
"""
Stop the specified or all docker containers launched by us
"""
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ip_cmd(self, name):
"""
Print ip of given container
"""
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/utils/gant_docker.py
|
GantDocker.ip_cmd
|
python
|
def ip_cmd(self, name):
if not self.container_exists(name=name):
exit('Unknown container {0}'.format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
else:
echo(ip)
|
Print ip of given container
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/gant_docker.py#L189-L201
| null |
class GantDocker (DockerHelper):
"""
Gluster test env specific helper functions for docker
"""
def __init__(self):
super(GantDocker, self).__init__()
def setConf(self, conf):
self.conf = conf
def __handle_build_stream(self, stream, verbose):
for line in stream:
d = json.loads(line.decode('utf-8'))
if "error" in d:
return d["error"].strip()
elif verbose:
echo(d["stream"].strip())
return None
def build_base_image_cmd(self, force):
"""
Build the glusterbase image
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
verbose = self.conf.verbose
if self.image_exists(tag=basetag):
if not force:
echo("Image with tag '{0}' already exists".format(basetag))
return self.image_by_tag(basetag)
else:
self.remove_image(basetag)
echo("Building base image")
stream = self.build(path=basedir, rm=True, tag=basetag)
err = self.__handle_build_stream(stream, verbose)
if err:
echo("Building base image failed with following error:")
echo(err)
return None
image = self.image_by_tag(basetag)
echo("Built base image {0} (Id: {1})".format(basetag, image['Id']))
return image
def build_main_image_cmd(self, srcdir, force):
"""
Build the main image to be used for launching containers
"""
check_permissions()
basetag = self.conf.basetag
basedir = self.conf.basedir
maintag = self.conf.maintag
if not self.image_exists(tag=basetag):
if not force:
exit("Base image with tag {0} does not exist".format(basetag))
else:
echo("FORCE given. Forcefully building the base image.")
self.build_base_image_cmd(force)
if self.image_exists(tag=maintag):
self.remove_image(tag=maintag)
build_command = "/build/make-install-gluster.sh"
container = self.create_container(image=basetag, command=build_command,
volumes=["/build", "/src"])
self.start(container, binds={basedir: "/build", srcdir: "/src"})
echo('Building main image')
while self.inspect_container(container)["State"]["Running"]:
time.sleep(5)
if not self.inspect_container(container)["State"]["ExitCode"] == 0:
echo("Build failed")
echo("Dumping logs")
echo(self.logs(container))
exit()
# The docker remote api expects the repository and tag to be seperate
# items for commit
repo = maintag.split(':')[0]
tag = maintag.split(':')[1]
image = self.commit(container['Id'], repository=repo, tag=tag)
echo("Built main image {0} (Id: {1})".format(maintag, image['Id']))
def launch_cmd(self, n, force):
"""
Launch the specified docker containers using the main image
"""
check_permissions()
prefix = self.conf.prefix
maintag = self.conf.maintag
commandStr = "supervisord -c /etc/supervisor/conf.d/supervisord.conf"
for i in range(1, n+1):
cName = "{0}-{1}".format(prefix, i)
if self.container_exists(name=cName):
if not force:
exit("Container with name {0} already "
"exists.".format(cName))
else:
if self.container_running(name=cName):
self.stop(cName)
self.remove_container(cName, v=True)
c = self.create_container(image=maintag, name=cName,
command=commandStr, volumes=["/bricks"])
self.start(c['Id'], privileged=True)
time.sleep(2) # Wait for container to startup
echo("Launched {0} (Id: {1})".format(cName, c['Id']))
c = None
cName = None
def stop_cmd(self, name, force):
"""
Stop the specified or all docker containers launched by us
"""
check_permissions()
if name:
echo("Would stop container {0}".format(name))
else:
echo("Would stop all containers")
echo("For now use 'docker stop' to stop the containers")
def info_cmd(self, args):
"""
Print information on the built up environment
"""
echo('Would print info on the gluster env')
def ssh_cmd(self, name, ssh_command):
"""
SSH into given container and executre command if given
"""
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for "
"container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password', " ".join(ssh_command))
else:
ssh.launch_shell('root', ip, 'password')
def gluster_cmd(self, args):
name = args["<name>"]
ssh_command = args["<gluster-command>"]
if not self.container_exists(name=name):
exit("Unknown container {0}".format(name))
if not self.container_running(name=name):
exit("Container {0} is not running".format(name))
ip = self.get_container_ip(name)
if not ip:
exit("Failed to get network address for"
" container {0}".format(name))
if ssh_command:
ssh.do_cmd('root', ip, 'password',
"gluster {0}".format(" ".join(ssh_command)))
else:
ssh.do_cmd('root', ip, 'password', 'gluster')
|
kshlm/gant
|
gant/main.py
|
gant
|
python
|
def gant(ctx, conf, basedir, basetag, maintag, prefix, verbose):
ctx.obj.initConf(basetag, maintag, basedir, prefix, verbose)
ctx.obj.gd.setConf(ctx.obj.conf)
|
GAnt : The Gluster helper ant\n
Creates GlusterFS development and testing environments using Docker
|
train
|
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/main.py#L76-L82
| null |
#! /usr/bin/env python
from __future__ import unicode_literals, print_function
from .utils.gant_ctx import GantCtx
import os
import click
helpStr = """
Gant : The Gluster helper ant
Creates GlusterFS development and testing environments using Docker
Usage:
gant [options] build-base [force]
gant [options] build-main <srcdir>[force]
gant [options] launch <number> [force]
gant [options] stop [<name>] [force]
gant [options] info
gant [options] ssh <name> [--] [<ssh-command>...]
gant [options] ip <name>
gant [options] gluster <name> [--] [<gluster-command>...]
Commands:
build-base Builds the base docker image
build-main Builds the main docker image to be used for launching
containers
launch Launches the given number of containers
stop Stops the launched containers
info Gives information about the gant environment
ssh SSHes into the named container and runs the command if given
ip Gives IP address of the named container
gluster Runs given gluster CLI command in named container
Arguments:
force Forcefully do the operation
<srcdir> Directory containing the GlusterFS source
<number> Number of containers to launch
<name> Name of container to stop
<ssh-command> Command to run inside the container
<gluster-command> Gluster CLI command to run inside the container
Options:
-c <conffile>, --conf <conffile> Configuration file to use
--basetag <basetag> Tag to be used for the base docker image
[default: glusterbase:latest]
--maintag <maintag> Tag to be used for the main docker image
[default: gluster:latest]
--basedir <basedir> Base directory containing the Dockerfile
and helper scripts for Gant
[default: {0}]
--prefix <prefix> Prefix to be used for naming the
launched docker containers
[default: gluster]
-V, --verbose Verbose output
""".format(os.getcwd())
@click.group(no_args_is_help=True)
@click.option("-c", "--conf", type=click.File(),
help="Configuration file to use")
@click.option("--basedir", default=os.getcwd(),
type=click.Path(exists=True, file_okay=False, readable=True),
help="Directory containing the Dockerfile and helper scripts "
"for GAnt")
@click.option("--basetag", default="glusterbase:latest", show_default=True,
help="Tag to be used for the base docker image")
@click.option("--maintag", default="gluster:latest", show_default=True,
help="Tag to be used for the main docker image")
@click.option("--prefix", default="gluster", show_default=True,
help="Prefix used for naming launched containers")
@click.option("--verbose", "-v", count=True, metavar="",
help="Increase verbosity of output")
@click.version_option(prog_name='GAnt')
@click.pass_context
@gant.command(name="build-base", help="Build the base docker image")
@click.option("--force", is_flag=True, default=False,
help="Forcefully do the operation")
@click.pass_context
def build_base(ctx, force):
ctx.obj.gd.build_base_image_cmd(force)
@gant.command(name="build-main",
help="Build the main docker image to be used for launching")
@click.option("--force", is_flag=True, default=False,
help="Forcefully do the operation")
@click.argument("srcdir",
type=click.Path(exists=True, file_okay=False, readable=True))
@click.pass_context
def build_main(ctx, srcdir, force):
ctx.obj.gd.build_main_image_cmd(srcdir, force)
@gant.command(help="Launch the given number of containers")
@click.option("--force", is_flag=True, default=False,
help="Forcefully do the operation")
@click.argument("number", type=click.INT)
@click.pass_context
def launch(ctx, number, force):
ctx.obj.gd.launch_cmd(number, force)
@gant.command(help="Stop the launched containers")
@click.option("--force", is_flag=True, default=False,
help="Forcefully do the operation")
@click.argument("name", required=False, type=click.STRING)
@click.pass_context
def stop(ctx, name, force):
ctx.obj.gd.stop_cmd(name, force)
@gant.command(help="Show information about the GAnt environment")
@click.pass_context
def info(ctx):
ctx.obj.gd.info_cmd()
@gant.command(help="Print ip of given container")
@click.argument("container", type=click.STRING)
@click.pass_context
def ip(ctx, container):
ctx.obj.gd.ip_cmd(container)
@gant.command(help="SSHes into named container and runs command if given")
@click.argument("container", type=click.STRING)
@click.argument("command", required=False, type=click.STRING, nargs=-1)
@click.pass_context
def ssh(ctx, container, command):
ctx.obj.gd.ssh_cmd(container, command)
@gant.command(help="Runs given gluster command in named container")
@click.argument("container", type=click.STRING)
@click.argument("command", type=click.STRING, nargs=-1)
def gluster(ctx, container, command):
ctx.obj.gd.gluster_cmd(container, command)
def main():
gant(obj=GantCtx())
|
laysakura/relshell
|
relshell/recorddef.py
|
RecordDef.colindex_by_colname
|
python
|
def colindex_by_colname(self, colname):
for i, coldef in enumerate(self): # iterate each column's definition
if coldef.name == colname:
return i
raise ValueError('No column named "%s" found' % (colname))
|
Return column index whose name is :param:`column`
:raises: `ValueError` when no column with :param:`colname` found
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/recorddef.py#L67-L75
| null |
class RecordDef(object):
"""Used as DDL (like CREATE TABLE) information."""
# APIs
def __init__(self, record_def):
"""Creates an object with each column property from `record_def`.
:param record_def: list of column definition hash (see example below)
*Example:*
.. code-block:: python
rdef = RecordDef(
[
{'name' : 'col1',
'type' : 'STRING',
'primary_key' : True,
},
{'name' : 'col2',
'type' : 'INT',
},
]
)
rdef[1].name # => 'col2'
rdef[1].type # => Type('INT')
.. seealso::
`ColumnDef.required_fields <#relshell.columndef.ColumnDef.required_fields>`_ and
`ColumnDef.optional_fields <#relshell.columndef.ColumnDef.optional_fields>`_
for each column's specification.
:raises: `AttributeError` if `record_def` has invalid format
"""
self._recdef = record_def
self._set_coldefs()
def __len__(self):
"""Returns number of columns"""
return len(self._coldefs)
def __getitem__(self, key):
"""Returns specified column definition.
:param key: column index to get definition.
:type key: int (0-origin)
:rtype: `ColumnDef <#relshell.columndef.ColumnDef>`_
"""
return self._coldefs[key]
def __eq__(self, other):
return self._recdef == other._recdef
def __ne__(self, other):
return not self.__eq__(other)
# Private functions
def _set_coldefs(self):
self._coldefs = []
for i, raw_coldef in enumerate(self._recdef):
try:
self._coldefs.append(ColumnDef(raw_coldef))
except AttributeError as e:
raise AttributeError("In column %d: %s" % (i, e))
def __str__(self):
return str(self._recdef)
|
laysakura/relshell
|
relshell/shelloperator.py
|
ShellOperator.run
|
python
|
def run(self, in_batches):
if len(in_batches) != len(self._batcmd.batch_to_file_s):
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s) # [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile.
raise AttributeError('len(in_batches) == %d, while %d IN_BATCH* are specified in command below:%s$ %s' %
(len(in_batches), len(self._batcmd.batch_to_file_s), os.linesep, self._batcmd.sh_cmd))
# prepare & start process
BaseShellOperator._batches_to_tmpfile(self._in_record_sep, self._in_column_sep, in_batches, self._batcmd.batch_to_file_s)
process = BaseShellOperator._start_process(self._batcmd, self._cwd, self._env)
BaseShellOperator._batch_to_stdin(process, self._in_record_sep, self._in_column_sep, in_batches, self._batcmd.batch_to_file_s)
# wait process & get its output
BaseShellOperator._close_process_input_stdin(self._batcmd.batch_to_file_s)
BaseShellOperator._wait_process(process, self._batcmd.sh_cmd, self._success_exitcodes)
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s)
if self._batcmd.batch_from_file.is_stdout():
out_str = self._batcmd.batch_from_file.read_stdout(process.stdout)
elif self._batcmd.batch_from_file.is_tmpfile():
out_str = self._batcmd.batch_from_file.read_tmpfile()
else: # pragma: no cover
assert(False)
out_batch = BaseShellOperator._out_str_to_batch(out_str, self._out_recdef, self._out_col_patterns)
self._batcmd.batch_from_file.finish()
return out_batch
|
Run shell operator synchronously to eat `in_batches`
:param in_batches: `tuple` of batches to process
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/shelloperator.py#L49-L78
|
[
"def _start_process(batcmd, cwd, env, non_blocking_stdout=False):\n try:\n p = Popen(\n shlex.split(batcmd.sh_cmd),\n stdin = PIPE if batcmd.has_input_from_stdin() else None,\n stdout = PIPE if batcmd.batch_from_file and batcmd.batch_from_file.is_stdout() else None,\n stderr = None,\n cwd = cwd,\n env = env,\n bufsize = 1 if non_blocking_stdout else 0,\n )\n BaseShellOperator._logger.info('[Command execution] $ %s' % (batcmd.sh_cmd))\n except OSError as e:\n raise OSError('Following command fails - %s:%s$ %s' % (e, os.linesep, batcmd.sh_cmd))\n\n if non_blocking_stdout:\n fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n\n return p\n",
"def _batches_to_tmpfile(in_record_sep, in_column_sep, in_batches, batch_to_file_s):\n \"\"\"Create files to store in-batches contents (if necessary)\"\"\"\n for i, b2f in enumerate(batch_to_file_s):\n if b2f.is_tmpfile():\n input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)\n b2f.write_tmpfile(input_str)\n",
"def _batch_to_stdin(process, in_record_sep, in_column_sep, in_batches, batch_to_file_s):\n \"\"\"Write in-batch contents to `process` 's stdin (if necessary)\n \"\"\"\n for i, b2f in enumerate(batch_to_file_s):\n if b2f.is_stdin():\n input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)\n b2f.write_stdin(process.stdin, input_str)\n break # at most 1 batch_to_file can be from stdin\n",
"def _out_str_to_batch(out_str, out_recdef, out_col_patterns):\n out_recs = []\n pos = 0\n while True:\n (rec, rec_str_len) = BaseShellOperator._parse_record(out_str[pos:], out_col_patterns, out_recdef)\n if rec is None:\n break\n out_recs.append(rec)\n pos += rec_str_len\n out_batch = Batch(out_recdef, tuple(out_recs))\n return out_batch\n",
"def _wait_process(process, sh_cmd, success_exitcodes):\n exitcode = process.wait() # [todo] - if this call does not return, it means 2nd `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_ are not sutisfied => raise `AttributeError`\n if exitcode not in success_exitcodes:\n raise OSError('Following command ended with exitcode %d:%s$ %s' % (exitcode, os.linesep, sh_cmd))\n",
"def _close_process_input_stdin(batch_to_file_s):\n for b2f in batch_to_file_s:\n if b2f.is_stdin():\n b2f.finish()\n",
"def _rm_process_input_tmpfiles(batch_to_file_s):\n for b2f in batch_to_file_s:\n if b2f.is_tmpfile():\n b2f.finish()\n"
] |
class ShellOperator(BaseShellOperator):
"""ShellOperator
"""
def __init__(
self,
# non-kw & common w/ BaseShellOperator param
cmd,
out_record_def,
# non-kw & original param
out_col_patterns,
# kw & common w/ BaseShellOperator param
success_exitcodes=(0, ),
cwd=None,
env=os.environ,
in_record_sep=os.linesep,
in_column_sep=' ',
# kw & original param
):
"""Constructor
"""
BaseShellOperator.__init__(
self,
cmd,
out_record_def,
success_exitcodes,
cwd,
env,
in_record_sep,
in_column_sep, # [fix] - 複数カラムを1レコードに(文字列に)落し込むとき,各カラムの区切りが同一である必要はない.sprintfみたいにformat指定できるべき.
out_col_patterns,
)
|
laysakura/relshell
|
relshell/base_shelloperator.py
|
BaseShellOperator._batches_to_tmpfile
|
python
|
def _batches_to_tmpfile(in_record_sep, in_column_sep, in_batches, batch_to_file_s):
for i, b2f in enumerate(batch_to_file_s):
if b2f.is_tmpfile():
input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)
b2f.write_tmpfile(input_str)
|
Create files to store in-batches contents (if necessary)
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/base_shelloperator.py#L94-L99
|
[
"def _input_str(in_batch, in_record_sep, in_column_sep):\n if len(in_batch) == 0:\n return ''\n\n input_str_list = []\n for record in in_batch:\n for col in record:\n input_str_list.append(str(col))\n input_str_list.append(in_column_sep)\n del input_str_list[-1] # remove last in_column_sep\n input_str_list.append(in_record_sep)\n input_str_list[-1] = os.linesep # remove last in_record_sep & adds newline at last (since POSIX requires it)\n return ''.join(input_str_list)\n"
] |
class BaseShellOperator(object):
"""BaseShellOperator
"""
__metaclass__ = ABCMeta
_logger = None
def __init__(
self,
cmd,
out_record_def,
success_exitcodes,
cwd,
env,
in_record_sep, # [todo] - explain how this parameter is used (using diagram?)
in_column_sep,
out_col_patterns,
):
"""Constructor
"""
self._batcmd = BatchCommand(cmd)
self._out_recdef = out_record_def
self._success_exitcodes = success_exitcodes
self._cwd = cwd
self._env = env
self._in_record_sep = in_record_sep
self._in_column_sep = in_column_sep
self._out_col_patterns = out_col_patterns
BaseShellOperator._logger = Logger.instance()
@abstractmethod
def run(self, in_batches): # pragma: no cover
"""Run shell operator synchronously to eat `in_batches`
:param in_batches: `tuple` of batches to process
"""
pass
@staticmethod
def _start_process(batcmd, cwd, env, non_blocking_stdout=False):
try:
p = Popen(
shlex.split(batcmd.sh_cmd),
stdin = PIPE if batcmd.has_input_from_stdin() else None,
stdout = PIPE if batcmd.batch_from_file and batcmd.batch_from_file.is_stdout() else None,
stderr = None,
cwd = cwd,
env = env,
bufsize = 1 if non_blocking_stdout else 0,
)
BaseShellOperator._logger.info('[Command execution] $ %s' % (batcmd.sh_cmd))
except OSError as e:
raise OSError('Following command fails - %s:%s$ %s' % (e, os.linesep, batcmd.sh_cmd))
if non_blocking_stdout:
fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
return p
@staticmethod
def _input_str(in_batch, in_record_sep, in_column_sep):
if len(in_batch) == 0:
return ''
input_str_list = []
for record in in_batch:
for col in record:
input_str_list.append(str(col))
input_str_list.append(in_column_sep)
del input_str_list[-1] # remove last in_column_sep
input_str_list.append(in_record_sep)
input_str_list[-1] = os.linesep # remove last in_record_sep & adds newline at last (since POSIX requires it)
return ''.join(input_str_list)
@staticmethod
@staticmethod
def _batch_to_stdin(process, in_record_sep, in_column_sep, in_batches, batch_to_file_s):
"""Write in-batch contents to `process` 's stdin (if necessary)
"""
for i, b2f in enumerate(batch_to_file_s):
if b2f.is_stdin():
input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)
b2f.write_stdin(process.stdin, input_str)
break # at most 1 batch_to_file can be from stdin
@staticmethod
def _parse_record(str_to_parse, col_patterns, recdef):
cols = []
pos = 0
for col_def in recdef:
col_name = col_def.name
col_pat = col_patterns[col_name]
col_type = col_def.type
mat = col_pat.search(str_to_parse[pos:])
# no more record to parse
if mat is None:
BaseShellOperator._logger.debug('Following string does not match `out_col_patterns`, ignored: """%s"""'
% (str_to_parse))
return (None, None)
# beginning substring is skipped
if mat.start() > 0:
BaseShellOperator._logger.debug('Following string does not match `out_col_patterns`, ignored: """%s"""'
% (str_to_parse[:mat.start()]))
pos += mat.end()
if pos == 0:
raise ValueError('Regex pattern "%s" matches 0-length string' % (col_pat))
col_str = mat.group()
cols.append(col_type.python_cast(col_str))
return (Record(*cols), pos)
@staticmethod
def _out_str_to_batch(out_str, out_recdef, out_col_patterns):
out_recs = []
pos = 0
while True:
(rec, rec_str_len) = BaseShellOperator._parse_record(out_str[pos:], out_col_patterns, out_recdef)
if rec is None:
break
out_recs.append(rec)
pos += rec_str_len
out_batch = Batch(out_recdef, tuple(out_recs))
return out_batch
@staticmethod
def _wait_process(process, sh_cmd, success_exitcodes):
exitcode = process.wait() # [todo] - if this call does not return, it means 2nd `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_ are not sutisfied => raise `AttributeError`
if exitcode not in success_exitcodes:
raise OSError('Following command ended with exitcode %d:%s$ %s' % (exitcode, os.linesep, sh_cmd))
@staticmethod
def _close_process_input_stdin(batch_to_file_s):
for b2f in batch_to_file_s:
if b2f.is_stdin():
b2f.finish()
@staticmethod
def _rm_process_input_tmpfiles(batch_to_file_s):
for b2f in batch_to_file_s:
if b2f.is_tmpfile():
b2f.finish()
|
laysakura/relshell
|
relshell/base_shelloperator.py
|
BaseShellOperator._batch_to_stdin
|
python
|
def _batch_to_stdin(process, in_record_sep, in_column_sep, in_batches, batch_to_file_s):
for i, b2f in enumerate(batch_to_file_s):
if b2f.is_stdin():
input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)
b2f.write_stdin(process.stdin, input_str)
break
|
Write in-batch contents to `process` 's stdin (if necessary)
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/base_shelloperator.py#L102-L109
|
[
"def _input_str(in_batch, in_record_sep, in_column_sep):\n if len(in_batch) == 0:\n return ''\n\n input_str_list = []\n for record in in_batch:\n for col in record:\n input_str_list.append(str(col))\n input_str_list.append(in_column_sep)\n del input_str_list[-1] # remove last in_column_sep\n input_str_list.append(in_record_sep)\n input_str_list[-1] = os.linesep # remove last in_record_sep & adds newline at last (since POSIX requires it)\n return ''.join(input_str_list)\n"
] |
class BaseShellOperator(object):
"""BaseShellOperator
"""
__metaclass__ = ABCMeta
_logger = None
def __init__(
self,
cmd,
out_record_def,
success_exitcodes,
cwd,
env,
in_record_sep, # [todo] - explain how this parameter is used (using diagram?)
in_column_sep,
out_col_patterns,
):
"""Constructor
"""
self._batcmd = BatchCommand(cmd)
self._out_recdef = out_record_def
self._success_exitcodes = success_exitcodes
self._cwd = cwd
self._env = env
self._in_record_sep = in_record_sep
self._in_column_sep = in_column_sep
self._out_col_patterns = out_col_patterns
BaseShellOperator._logger = Logger.instance()
@abstractmethod
def run(self, in_batches): # pragma: no cover
"""Run shell operator synchronously to eat `in_batches`
:param in_batches: `tuple` of batches to process
"""
pass
@staticmethod
def _start_process(batcmd, cwd, env, non_blocking_stdout=False):
try:
p = Popen(
shlex.split(batcmd.sh_cmd),
stdin = PIPE if batcmd.has_input_from_stdin() else None,
stdout = PIPE if batcmd.batch_from_file and batcmd.batch_from_file.is_stdout() else None,
stderr = None,
cwd = cwd,
env = env,
bufsize = 1 if non_blocking_stdout else 0,
)
BaseShellOperator._logger.info('[Command execution] $ %s' % (batcmd.sh_cmd))
except OSError as e:
raise OSError('Following command fails - %s:%s$ %s' % (e, os.linesep, batcmd.sh_cmd))
if non_blocking_stdout:
fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
return p
@staticmethod
def _input_str(in_batch, in_record_sep, in_column_sep):
if len(in_batch) == 0:
return ''
input_str_list = []
for record in in_batch:
for col in record:
input_str_list.append(str(col))
input_str_list.append(in_column_sep)
del input_str_list[-1] # remove last in_column_sep
input_str_list.append(in_record_sep)
input_str_list[-1] = os.linesep # remove last in_record_sep & adds newline at last (since POSIX requires it)
return ''.join(input_str_list)
@staticmethod
def _batches_to_tmpfile(in_record_sep, in_column_sep, in_batches, batch_to_file_s):
"""Create files to store in-batches contents (if necessary)"""
for i, b2f in enumerate(batch_to_file_s):
if b2f.is_tmpfile():
input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)
b2f.write_tmpfile(input_str)
@staticmethod
# at most 1 batch_to_file can be from stdin
@staticmethod
def _parse_record(str_to_parse, col_patterns, recdef):
cols = []
pos = 0
for col_def in recdef:
col_name = col_def.name
col_pat = col_patterns[col_name]
col_type = col_def.type
mat = col_pat.search(str_to_parse[pos:])
# no more record to parse
if mat is None:
BaseShellOperator._logger.debug('Following string does not match `out_col_patterns`, ignored: """%s"""'
% (str_to_parse))
return (None, None)
# beginning substring is skipped
if mat.start() > 0:
BaseShellOperator._logger.debug('Following string does not match `out_col_patterns`, ignored: """%s"""'
% (str_to_parse[:mat.start()]))
pos += mat.end()
if pos == 0:
raise ValueError('Regex pattern "%s" matches 0-length string' % (col_pat))
col_str = mat.group()
cols.append(col_type.python_cast(col_str))
return (Record(*cols), pos)
@staticmethod
def _out_str_to_batch(out_str, out_recdef, out_col_patterns):
out_recs = []
pos = 0
while True:
(rec, rec_str_len) = BaseShellOperator._parse_record(out_str[pos:], out_col_patterns, out_recdef)
if rec is None:
break
out_recs.append(rec)
pos += rec_str_len
out_batch = Batch(out_recdef, tuple(out_recs))
return out_batch
@staticmethod
def _wait_process(process, sh_cmd, success_exitcodes):
exitcode = process.wait() # [todo] - if this call does not return, it means 2nd `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_ are not sutisfied => raise `AttributeError`
if exitcode not in success_exitcodes:
raise OSError('Following command ended with exitcode %d:%s$ %s' % (exitcode, os.linesep, sh_cmd))
@staticmethod
def _close_process_input_stdin(batch_to_file_s):
for b2f in batch_to_file_s:
if b2f.is_stdin():
b2f.finish()
@staticmethod
def _rm_process_input_tmpfiles(batch_to_file_s):
for b2f in batch_to_file_s:
if b2f.is_tmpfile():
b2f.finish()
|
laysakura/relshell
|
relshell/timestamp.py
|
Timestamp.datetime
|
python
|
def datetime(self):
return dt.datetime(
self.year(), self.month(), self.day(),
self.hour(), self.minute(), self.second(),
int(self.millisecond() * 1e3))
|
Return `datetime` object
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/timestamp.py#L63-L68
|
[
"def year(self):\n \"\"\"Return year\"\"\"\n return int(str(self._ts)[0:4])\n",
"def month(self):\n \"\"\"Return month\"\"\"\n return int(str(self._ts)[4:6])\n",
"def day(self):\n \"\"\"Return day\"\"\"\n return int(str(self._ts)[6:8])\n",
"def hour(self):\n \"\"\"Return hour\"\"\"\n return int(str(self._ts)[8:10])\n",
"def minute(self):\n \"\"\"Return minute\"\"\"\n return int(str(self._ts)[10:12])\n",
"def second(self):\n \"\"\"Return self\"\"\"\n return int(str(self._ts)[12:14])\n",
"def millisecond(self):\n \"\"\"Return millisecond\"\"\"\n return int(str(self._ts)[14:17])\n"
] |
class Timestamp(object):
"""Provides efficient data structure to represent timestamp
"""
def __init__(self, timestamp_str):
"""Constructor
:param timestamp_str: timestamp string
:type timestamp_str: `%Y-%m-%d %H:%M:%S` or `%Y-%m-%d`
"""
try:
t = dt.datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S')
except ValueError:
t = dt.datetime.strptime(timestamp_str, '%Y-%m-%d')
except ValueError:
raise ValueError('"%s" does not have appropreate format' % (timestamp_str))
# year=2013, month=10, day=29, hour=01, minute=04, second=12, microsecond=123456
# => 20131029010412123 (microsecond is cut to millisecond)
# [todo] - compress encoded timestamp (might be better to use `datetime.datetime` as-is)
self._ts = (long(t.microsecond * 1e-3) +
long(t.second * 1e3) + long(t.minute * 1e5) + long(t.hour * 1e7) +
long(t.day * 1e9) + long(t.month * 1e11) + long(t.year * 1e13))
def year(self):
"""Return year"""
return int(str(self._ts)[0:4])
def month(self):
"""Return month"""
return int(str(self._ts)[4:6])
def day(self):
"""Return day"""
return int(str(self._ts)[6:8])
def hour(self):
"""Return hour"""
return int(str(self._ts)[8:10])
def minute(self):
"""Return minute"""
return int(str(self._ts)[10:12])
def second(self):
"""Return self"""
return int(str(self._ts)[12:14])
def millisecond(self):
"""Return millisecond"""
return int(str(self._ts)[14:17])
def runoff_lower(self, timespan):
"""Check if this timestamp is lower than t0 of [t0, t1]"""
return self < timespan.get_start()
def runoff_higher(self, timespan):
"""Check if this timestamp is higher than t1 of [t0, t1]"""
return self > timespan.get_end()
def between(self, timespan):
"""Check if this timestamp is between t0 and t1 of [t0, t1]"""
return timespan.get_start() <= self <= timespan.get_end()
def __eq__(self, other):
return self._ts == other._ts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self._ts < other._ts
def __gt__(self, other):
return self._ts > other._ts
def __le__(self, other):
return self._ts <= other._ts
def __ge__(self, other):
return self._ts >= other._ts
def __add__(self, sec):
"""Add `sec` to this timestamp"""
return Timestamp(timestamp_str=(self.datetime() + dt.timedelta(seconds=sec)).strftime('%Y-%m-%d %H:%M:%S'))
def __sub__(self, sec):
"""Subtract `sec` to this timestamp"""
return Timestamp(timestamp_str=(self.datetime() - dt.timedelta(seconds=sec)).strftime('%Y-%m-%d %H:%M:%S'))
def __long__(self):
"""Return long representation of this timestamp"""
return self._ts
def __str__(self): # pragma: no cover
"""Return str representation of this timestamp"""
return "%04d-%02d-%02d %02d:%02d:%02d" % (
self.year(), self.month(), self.day(),
self.hour(), self.minute(), self.second())
|
laysakura/relshell
|
relshell/type.py
|
Type.equivalent_relshell_type
|
python
|
def equivalent_relshell_type(val):
builtin_type = type(val)
if builtin_type not in Type._typemap:
raise NotImplementedError("builtin type %s is not convertible to relshell type" %
(builtin_type))
relshell_type_str = Type._typemap[builtin_type]
return Type(relshell_type_str)
|
Returns `val`'s relshell compatible type.
:param val: value to check relshell equivalent type
:raises: `NotImplementedError` if val's relshell compatible type is not implemented.
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/type.py#L53-L64
| null |
class Type(object):
"""Types of columns."""
_typemap = {
# python type : relshell type
int : 'INT',
str : 'STRING',
Timestamp : 'TIMESTAMP'
}
type_list = _typemap.values()
"""List of relshell types."""
# APIs
def __init__(self, relshell_type_str):
"""Creates a Type object.
:param relshell_type_str: string representing relshell type (one of `Type.type_list <#relshell.type.Type.type_list>`_)
:raises: `NotImplementedError`
"""
if relshell_type_str not in Type._typemap.values():
raise NotImplementedError("Type %s is not supported as relshell type" %
(relshell_type_str))
self._typestr = relshell_type_str
self._type = Type._type_from_typestr(self._typestr)
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self._typestr
def python_cast(self, val):
"""Returns `val``s casted data.
:raises: `ValueError` if cast failes.
"""
return self._type(val)
@staticmethod
# private functions
@staticmethod
def _type_from_typestr(typestr):
rettype = None
for k, v in Type._typemap.iteritems():
if v == typestr:
assert(rettype is None)
rettype = k
return rettype
|
laysakura/relshell
|
relshell/daemon_shelloperator.py
|
DaemonShellOperator.run
|
python
|
def run(self, in_batches):
if len(in_batches) != len(self._batcmd.batch_to_file_s):
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s) # [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile.
raise AttributeError('len(in_batches) == %d, while %d IN_BATCH* are specified in command below:%s$ %s' %
(len(in_batches), len(self._batcmd.batch_to_file_s), os.linesep, self._batcmd.sh_cmd))
# prepare & start process (if necessary)
BaseShellOperator._batches_to_tmpfile(self._in_record_sep, self._in_column_sep, in_batches, self._batcmd.batch_to_file_s)
if self._process is None:
self._process = BaseShellOperator._start_process(
self._batcmd, self._cwd, self._env,
non_blocking_stdout=True)
# Begin thread to read from subprocess's stdout.
# Without this thread, subprocess's output buffer becomes full and no one solves it.
t_consumer = Thread(target=get_subprocess_output,
args=(self._process.stdout, self._batch_done_output, self._subprocess_out_str))
t_consumer.start()
# pass batch to subprocess
BaseShellOperator._batch_to_stdin(self._process, self._in_record_sep, self._in_column_sep,
in_batches, self._batcmd.batch_to_file_s)
# pass batch-done indicator to subprocess
self._process.stdin.write(self._batch_done_indicator)
# get output from subprocess
t_consumer.join()
subprocess_out_str = self._subprocess_out_str[0]
self._subprocess_out_str = []
out_batch = BaseShellOperator._out_str_to_batch(subprocess_out_str,
self._out_recdef, self._out_col_patterns)
return out_batch
|
Run shell operator synchronously to eat `in_batches`
:param in_batches: `tuple` of batches to process
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/daemon_shelloperator.py#L82-L118
|
[
"def _start_process(batcmd, cwd, env, non_blocking_stdout=False):\n try:\n p = Popen(\n shlex.split(batcmd.sh_cmd),\n stdin = PIPE if batcmd.has_input_from_stdin() else None,\n stdout = PIPE if batcmd.batch_from_file and batcmd.batch_from_file.is_stdout() else None,\n stderr = None,\n cwd = cwd,\n env = env,\n bufsize = 1 if non_blocking_stdout else 0,\n )\n BaseShellOperator._logger.info('[Command execution] $ %s' % (batcmd.sh_cmd))\n except OSError as e:\n raise OSError('Following command fails - %s:%s$ %s' % (e, os.linesep, batcmd.sh_cmd))\n\n if non_blocking_stdout:\n fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)\n\n return p\n",
"def _batches_to_tmpfile(in_record_sep, in_column_sep, in_batches, batch_to_file_s):\n \"\"\"Create files to store in-batches contents (if necessary)\"\"\"\n for i, b2f in enumerate(batch_to_file_s):\n if b2f.is_tmpfile():\n input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)\n b2f.write_tmpfile(input_str)\n",
"def _batch_to_stdin(process, in_record_sep, in_column_sep, in_batches, batch_to_file_s):\n \"\"\"Write in-batch contents to `process` 's stdin (if necessary)\n \"\"\"\n for i, b2f in enumerate(batch_to_file_s):\n if b2f.is_stdin():\n input_str = BaseShellOperator._input_str(in_batches[i], in_record_sep, in_column_sep)\n b2f.write_stdin(process.stdin, input_str)\n break # at most 1 batch_to_file can be from stdin\n",
"def _out_str_to_batch(out_str, out_recdef, out_col_patterns):\n out_recs = []\n pos = 0\n while True:\n (rec, rec_str_len) = BaseShellOperator._parse_record(out_str[pos:], out_col_patterns, out_recdef)\n if rec is None:\n break\n out_recs.append(rec)\n pos += rec_str_len\n out_batch = Batch(out_recdef, tuple(out_recs))\n return out_batch\n",
"def _rm_process_input_tmpfiles(batch_to_file_s):\n for b2f in batch_to_file_s:\n if b2f.is_tmpfile():\n b2f.finish()\n"
] |
class DaemonShellOperator(BaseShellOperator):
"""Instantiate process and keep it running.
`DaemonShellOperator` can instantiate processes which satisfy the following constraints:
1. Inputs records from `stdin`
2. Safely dies when `EOF` is input
3. Outputs deterministic string when inputting a specific record string.
Pair of "specific record string" & "deterministic string" is used as a separtor to distinguish each batch.
e.g. `cat` process outputs *LAST_RECORD_OF_BATCH\n* when inputting *LAST_RECORD_OF_BATCH\n*
Future support
--------------
Above constraints are losen like below in future:
1. Support input-records from file if file is only appended
2. Support non-`EOF` process terminator (e.g. `exit\n` command for some intreractive shell)
"""
def __init__(
self,
# non-kw & common w/ BaseShellOperator param
cmd,
out_record_def,
# non-kw & original param
out_col_patterns,
batch_done_indicator,
batch_done_output,
# kw & common w/ BaseShellOperator param
success_exitcodes=(0, ),
cwd=None,
env=os.environ,
in_record_sep=os.linesep,
in_column_sep=' ',
# kw & original param
):
"""Constuctor
:raises: `AttributeError` if `cmd` doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_
"""
BaseShellOperator.__init__(
self,
cmd,
out_record_def,
success_exitcodes,
cwd,
env,
in_record_sep,
in_column_sep,
out_col_patterns,
)
self._batch_done_indicator = batch_done_indicator
self._batch_done_output = batch_done_output
self._process = None
self._subprocess_out_str = [] # 0-th is subprocess's output. must not be str since it is immutable &
# get_subprocess_output cannot modify it
if not self._batcmd.has_input_from_stdin():
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s) # [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile.
raise AttributeError('Following command doesn\'t have input from stdin:%s$ %s' %
(os.linesep, self._batcmd.sh_cmd))
def kill(self):
"""Kill instantiated process
:raises: `AttributeError` if instantiated process doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_
"""
BaseShellOperator._close_process_input_stdin(self._batcmd.batch_to_file_s)
BaseShellOperator._wait_process(self._process, self._batcmd.sh_cmd, self._success_exitcodes)
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s)
self._process = None
def getpid(self):
return self._process.pid if self._process else None
@staticmethod
def _batch_done_start_pos(process_output_str, batch_done_output):
return process_output_str.rfind(batch_done_output)
|
laysakura/relshell
|
relshell/daemon_shelloperator.py
|
DaemonShellOperator.kill
|
python
|
def kill(self):
BaseShellOperator._close_process_input_stdin(self._batcmd.batch_to_file_s)
BaseShellOperator._wait_process(self._process, self._batcmd.sh_cmd, self._success_exitcodes)
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s)
self._process = None
|
Kill instantiated process
:raises: `AttributeError` if instantiated process doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/daemon_shelloperator.py#L120-L128
|
[
"def _wait_process(process, sh_cmd, success_exitcodes):\n exitcode = process.wait() # [todo] - if this call does not return, it means 2nd `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_ are not sutisfied => raise `AttributeError`\n if exitcode not in success_exitcodes:\n raise OSError('Following command ended with exitcode %d:%s$ %s' % (exitcode, os.linesep, sh_cmd))\n",
"def _close_process_input_stdin(batch_to_file_s):\n for b2f in batch_to_file_s:\n if b2f.is_stdin():\n b2f.finish()\n",
"def _rm_process_input_tmpfiles(batch_to_file_s):\n for b2f in batch_to_file_s:\n if b2f.is_tmpfile():\n b2f.finish()\n"
] |
class DaemonShellOperator(BaseShellOperator):
"""Instantiate process and keep it running.
`DaemonShellOperator` can instantiate processes which satisfy the following constraints:
1. Inputs records from `stdin`
2. Safely dies when `EOF` is input
3. Outputs deterministic string when inputting a specific record string.
Pair of "specific record string" & "deterministic string" is used as a separtor to distinguish each batch.
e.g. `cat` process outputs *LAST_RECORD_OF_BATCH\n* when inputting *LAST_RECORD_OF_BATCH\n*
Future support
--------------
Above constraints are losen like below in future:
1. Support input-records from file if file is only appended
2. Support non-`EOF` process terminator (e.g. `exit\n` command for some intreractive shell)
"""
def __init__(
self,
# non-kw & common w/ BaseShellOperator param
cmd,
out_record_def,
# non-kw & original param
out_col_patterns,
batch_done_indicator,
batch_done_output,
# kw & common w/ BaseShellOperator param
success_exitcodes=(0, ),
cwd=None,
env=os.environ,
in_record_sep=os.linesep,
in_column_sep=' ',
# kw & original param
):
"""Constuctor
:raises: `AttributeError` if `cmd` doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_
"""
BaseShellOperator.__init__(
self,
cmd,
out_record_def,
success_exitcodes,
cwd,
env,
in_record_sep,
in_column_sep,
out_col_patterns,
)
self._batch_done_indicator = batch_done_indicator
self._batch_done_output = batch_done_output
self._process = None
self._subprocess_out_str = [] # 0-th is subprocess's output. must not be str since it is immutable &
# get_subprocess_output cannot modify it
if not self._batcmd.has_input_from_stdin():
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s) # [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile.
raise AttributeError('Following command doesn\'t have input from stdin:%s$ %s' %
(os.linesep, self._batcmd.sh_cmd))
def run(self, in_batches):
"""Run shell operator synchronously to eat `in_batches`
:param in_batches: `tuple` of batches to process
"""
if len(in_batches) != len(self._batcmd.batch_to_file_s):
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s) # [todo] - Removing tmpfiles can be easily forgot. Less lifetime for tmpfile.
raise AttributeError('len(in_batches) == %d, while %d IN_BATCH* are specified in command below:%s$ %s' %
(len(in_batches), len(self._batcmd.batch_to_file_s), os.linesep, self._batcmd.sh_cmd))
# prepare & start process (if necessary)
BaseShellOperator._batches_to_tmpfile(self._in_record_sep, self._in_column_sep, in_batches, self._batcmd.batch_to_file_s)
if self._process is None:
self._process = BaseShellOperator._start_process(
self._batcmd, self._cwd, self._env,
non_blocking_stdout=True)
# Begin thread to read from subprocess's stdout.
# Without this thread, subprocess's output buffer becomes full and no one solves it.
t_consumer = Thread(target=get_subprocess_output,
args=(self._process.stdout, self._batch_done_output, self._subprocess_out_str))
t_consumer.start()
# pass batch to subprocess
BaseShellOperator._batch_to_stdin(self._process, self._in_record_sep, self._in_column_sep,
in_batches, self._batcmd.batch_to_file_s)
# pass batch-done indicator to subprocess
self._process.stdin.write(self._batch_done_indicator)
# get output from subprocess
t_consumer.join()
subprocess_out_str = self._subprocess_out_str[0]
self._subprocess_out_str = []
out_batch = BaseShellOperator._out_str_to_batch(subprocess_out_str,
self._out_recdef, self._out_col_patterns)
return out_batch
def getpid(self):
return self._process.pid if self._process else None
@staticmethod
def _batch_done_start_pos(process_output_str, batch_done_output):
return process_output_str.rfind(batch_done_output)
|
laysakura/relshell
|
relshell/batch_command.py
|
BatchCommand._parse
|
python
|
def _parse(batch_cmd):
cmd_array = shlex.split(batch_cmd)
(cmd_array, batch_to_file_s) = BatchCommand._parse_in_batches(cmd_array)
(cmd_array, batch_from_file) = BatchCommand._parse_out_batch(cmd_array)
return (list2cmdline(cmd_array), batch_to_file_s, batch_from_file)
|
:rtype: (sh_cmd, batch_to_file_s, batch_from_file)
:returns: parsed result like below:
.. code-block:: python
# when parsing 'diff IN_BATCH0 IN_BATCH1 > OUT_BATCH'
(
'diff /tmp/relshell-AbCDeF /tmp/relshell-uVwXyz',
( <instance of BatchToFile>, <instance of BatchToFile> ) # (IN_BATCH0, IN_BATCH1)
'STDOUT',
)
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch_command.py#L40-L57
|
[
"def _parse_in_batches(cmd_array):\n \"\"\"Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`.\n\n :param cmd_array: `shlex.split`-ed command\n :rtype: ([cmd_array], ( batch_to_file, batch_to_file, ... ) )\n :returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN).\n Returned `cmd_array` drops IN_BATCH related tokens.\n :raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0\n \"\"\"\n res_cmd_array = cmd_array[:]\n res_batch_to_file_s = []\n\n in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array)\n for batch_id, cmdidx in enumerate(in_batches_cmdidx):\n if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0`\n res_batch_to_file_s.append(BatchToFile('STDIN'))\n del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1]\n\n else: # IN_BATCHx is TMPFILE\n batch_to_file = BatchToFile('TMPFILE')\n res_batch_to_file_s.append(batch_to_file)\n res_cmd_array[cmdidx] = batch_to_file.tmpfile_path()\n\n return (res_cmd_array, tuple(res_batch_to_file_s))\n",
"def _parse_out_batch(cmd_array):\n \"\"\"Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`.\n\n :param cmd_array: `shlex.split`-ed command\n :rtype: ([cmd_array], batch_from_file)\n :returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT).\n Returned `cmd_array` drops OUT_BATCH related tokens.\n :raises: `IndexError` if multiple OUT_BATCH are found\n \"\"\"\n res_cmd_array = cmd_array[:]\n res_batch_from_file = None\n\n out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array)\n if out_batch_cmdidx is None:\n return (res_cmd_array, res_batch_from_file)\n\n if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH`\n res_batch_from_file = BatchFromFile('STDOUT')\n del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1]\n\n else: # OUT_BATCH is TMPFILE\n res_batch_from_file = BatchFromFile('TMPFILE')\n res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path()\n\n return (res_cmd_array, res_batch_from_file)\n"
] |
class BatchCommand(object):
"""BatchCommand"""
in_batches_pat = re.compile('IN_BATCH(\d+)')
"""Input batches"""
out_batch_pat = re.compile('OUT_BATCH')
"""Output batch"""
def __init__(self, batch_cmd):
"""Constructor
:param batch_cmd: command string w/ (IN|OUT)_BATCH*.
"""
(self.sh_cmd, self.batch_to_file_s, self.batch_from_file) = BatchCommand._parse(batch_cmd)
def has_input_from_stdin(self):
"""Return if any IN_BATCH* is input from stdin to process"""
for b2f in self.batch_to_file_s:
if b2f.is_stdin():
return True
return False
@staticmethod
@staticmethod
def _parse_in_batches(cmd_array):
"""Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], ( batch_to_file, batch_to_file, ... ) )
:returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN).
Returned `cmd_array` drops IN_BATCH related tokens.
:raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0
"""
res_cmd_array = cmd_array[:]
res_batch_to_file_s = []
in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array)
for batch_id, cmdidx in enumerate(in_batches_cmdidx):
if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0`
res_batch_to_file_s.append(BatchToFile('STDIN'))
del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1]
else: # IN_BATCHx is TMPFILE
batch_to_file = BatchToFile('TMPFILE')
res_batch_to_file_s.append(batch_to_file)
res_cmd_array[cmdidx] = batch_to_file.tmpfile_path()
return (res_cmd_array, tuple(res_batch_to_file_s))
@staticmethod
def _parse_out_batch(cmd_array):
"""Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], batch_from_file)
:returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT).
Returned `cmd_array` drops OUT_BATCH related tokens.
:raises: `IndexError` if multiple OUT_BATCH are found
"""
res_cmd_array = cmd_array[:]
res_batch_from_file = None
out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array)
if out_batch_cmdidx is None:
return (res_cmd_array, res_batch_from_file)
if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH`
res_batch_from_file = BatchFromFile('STDOUT')
del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1]
else: # OUT_BATCH is TMPFILE
res_batch_from_file = BatchFromFile('TMPFILE')
res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path()
return (res_cmd_array, res_batch_from_file)
@staticmethod
def _in_batches_cmdidx(cmd_array):
"""Raise `IndexError` if IN_BATCH0 - IN_BATCHx is not used sequentially in `cmd_array`
:returns: (IN_BATCH0's cmdidx, IN_BATCH1's cmdidx, ...)
$ cat a.txt IN_BATCH1 IN_BATCH0 b.txt c.txt IN_BATCH2 => (3, 2, 5)
"""
in_batches_cmdidx_dict = {}
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.in_batches_pat.match(tok)
if mat:
batch_idx = int(mat.group(1))
if batch_idx in in_batches_cmdidx_dict:
raise IndexError(
'IN_BATCH%d is used multiple times in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
in_batches_cmdidx_dict[batch_idx] = cmdidx
in_batches_cmdidx = []
for batch_idx in range(len(in_batches_cmdidx_dict)):
try:
cmdidx = in_batches_cmdidx_dict[batch_idx]
in_batches_cmdidx.append(cmdidx)
except KeyError:
raise IndexError('IN_BATCH%d is not found in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
return tuple(in_batches_cmdidx)
@staticmethod
def _out_batch_cmdidx(cmd_array):
"""Raise `IndexError` if OUT_BATCH is used multiple time
:returns: OUT_BATCH cmdidx (None if OUT_BATCH is not in `cmd_array`)
$ cat a.txt > OUT_BATCH => 3
"""
out_batch_cmdidx = None
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.out_batch_pat.match(tok)
if mat:
if out_batch_cmdidx:
raise IndexError(
'OUT_BATCH is used multiple times in command below:%s$ %s' %
(os.linesep, list2cmdline(cmd_array)))
out_batch_cmdidx = cmdidx
return out_batch_cmdidx
|
laysakura/relshell
|
relshell/batch_command.py
|
BatchCommand._parse_in_batches
|
python
|
def _parse_in_batches(cmd_array):
res_cmd_array = cmd_array[:]
res_batch_to_file_s = []
in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array)
for batch_id, cmdidx in enumerate(in_batches_cmdidx):
if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0`
res_batch_to_file_s.append(BatchToFile('STDIN'))
del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1]
else: # IN_BATCHx is TMPFILE
batch_to_file = BatchToFile('TMPFILE')
res_batch_to_file_s.append(batch_to_file)
res_cmd_array[cmdidx] = batch_to_file.tmpfile_path()
return (res_cmd_array, tuple(res_batch_to_file_s))
|
Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], ( batch_to_file, batch_to_file, ... ) )
:returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN).
Returned `cmd_array` drops IN_BATCH related tokens.
:raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch_command.py#L60-L83
| null |
class BatchCommand(object):
"""BatchCommand"""
in_batches_pat = re.compile('IN_BATCH(\d+)')
"""Input batches"""
out_batch_pat = re.compile('OUT_BATCH')
"""Output batch"""
def __init__(self, batch_cmd):
"""Constructor
:param batch_cmd: command string w/ (IN|OUT)_BATCH*.
"""
(self.sh_cmd, self.batch_to_file_s, self.batch_from_file) = BatchCommand._parse(batch_cmd)
def has_input_from_stdin(self):
"""Return if any IN_BATCH* is input from stdin to process"""
for b2f in self.batch_to_file_s:
if b2f.is_stdin():
return True
return False
@staticmethod
def _parse(batch_cmd):
"""
:rtype: (sh_cmd, batch_to_file_s, batch_from_file)
:returns: parsed result like below:
.. code-block:: python
# when parsing 'diff IN_BATCH0 IN_BATCH1 > OUT_BATCH'
(
'diff /tmp/relshell-AbCDeF /tmp/relshell-uVwXyz',
( <instance of BatchToFile>, <instance of BatchToFile> ) # (IN_BATCH0, IN_BATCH1)
'STDOUT',
)
"""
cmd_array = shlex.split(batch_cmd)
(cmd_array, batch_to_file_s) = BatchCommand._parse_in_batches(cmd_array)
(cmd_array, batch_from_file) = BatchCommand._parse_out_batch(cmd_array)
return (list2cmdline(cmd_array), batch_to_file_s, batch_from_file)
@staticmethod
@staticmethod
def _parse_out_batch(cmd_array):
"""Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], batch_from_file)
:returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT).
Returned `cmd_array` drops OUT_BATCH related tokens.
:raises: `IndexError` if multiple OUT_BATCH are found
"""
res_cmd_array = cmd_array[:]
res_batch_from_file = None
out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array)
if out_batch_cmdidx is None:
return (res_cmd_array, res_batch_from_file)
if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH`
res_batch_from_file = BatchFromFile('STDOUT')
del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1]
else: # OUT_BATCH is TMPFILE
res_batch_from_file = BatchFromFile('TMPFILE')
res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path()
return (res_cmd_array, res_batch_from_file)
@staticmethod
def _in_batches_cmdidx(cmd_array):
"""Raise `IndexError` if IN_BATCH0 - IN_BATCHx is not used sequentially in `cmd_array`
:returns: (IN_BATCH0's cmdidx, IN_BATCH1's cmdidx, ...)
$ cat a.txt IN_BATCH1 IN_BATCH0 b.txt c.txt IN_BATCH2 => (3, 2, 5)
"""
in_batches_cmdidx_dict = {}
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.in_batches_pat.match(tok)
if mat:
batch_idx = int(mat.group(1))
if batch_idx in in_batches_cmdidx_dict:
raise IndexError(
'IN_BATCH%d is used multiple times in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
in_batches_cmdidx_dict[batch_idx] = cmdidx
in_batches_cmdidx = []
for batch_idx in range(len(in_batches_cmdidx_dict)):
try:
cmdidx = in_batches_cmdidx_dict[batch_idx]
in_batches_cmdidx.append(cmdidx)
except KeyError:
raise IndexError('IN_BATCH%d is not found in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
return tuple(in_batches_cmdidx)
@staticmethod
def _out_batch_cmdidx(cmd_array):
"""Raise `IndexError` if OUT_BATCH is used multiple time
:returns: OUT_BATCH cmdidx (None if OUT_BATCH is not in `cmd_array`)
$ cat a.txt > OUT_BATCH => 3
"""
out_batch_cmdidx = None
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.out_batch_pat.match(tok)
if mat:
if out_batch_cmdidx:
raise IndexError(
'OUT_BATCH is used multiple times in command below:%s$ %s' %
(os.linesep, list2cmdline(cmd_array)))
out_batch_cmdidx = cmdidx
return out_batch_cmdidx
|
laysakura/relshell
|
relshell/batch_command.py
|
BatchCommand._parse_out_batch
|
python
|
def _parse_out_batch(cmd_array):
res_cmd_array = cmd_array[:]
res_batch_from_file = None
out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array)
if out_batch_cmdidx is None:
return (res_cmd_array, res_batch_from_file)
if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH`
res_batch_from_file = BatchFromFile('STDOUT')
del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1]
else: # OUT_BATCH is TMPFILE
res_batch_from_file = BatchFromFile('TMPFILE')
res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path()
return (res_cmd_array, res_batch_from_file)
|
Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], batch_from_file)
:returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT).
Returned `cmd_array` drops OUT_BATCH related tokens.
:raises: `IndexError` if multiple OUT_BATCH are found
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch_command.py#L86-L110
| null |
class BatchCommand(object):
"""BatchCommand"""
in_batches_pat = re.compile('IN_BATCH(\d+)')
"""Input batches"""
out_batch_pat = re.compile('OUT_BATCH')
"""Output batch"""
def __init__(self, batch_cmd):
"""Constructor
:param batch_cmd: command string w/ (IN|OUT)_BATCH*.
"""
(self.sh_cmd, self.batch_to_file_s, self.batch_from_file) = BatchCommand._parse(batch_cmd)
def has_input_from_stdin(self):
"""Return if any IN_BATCH* is input from stdin to process"""
for b2f in self.batch_to_file_s:
if b2f.is_stdin():
return True
return False
@staticmethod
def _parse(batch_cmd):
"""
:rtype: (sh_cmd, batch_to_file_s, batch_from_file)
:returns: parsed result like below:
.. code-block:: python
# when parsing 'diff IN_BATCH0 IN_BATCH1 > OUT_BATCH'
(
'diff /tmp/relshell-AbCDeF /tmp/relshell-uVwXyz',
( <instance of BatchToFile>, <instance of BatchToFile> ) # (IN_BATCH0, IN_BATCH1)
'STDOUT',
)
"""
cmd_array = shlex.split(batch_cmd)
(cmd_array, batch_to_file_s) = BatchCommand._parse_in_batches(cmd_array)
(cmd_array, batch_from_file) = BatchCommand._parse_out_batch(cmd_array)
return (list2cmdline(cmd_array), batch_to_file_s, batch_from_file)
@staticmethod
def _parse_in_batches(cmd_array):
"""Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], ( batch_to_file, batch_to_file, ... ) )
:returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN).
Returned `cmd_array` drops IN_BATCH related tokens.
:raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0
"""
res_cmd_array = cmd_array[:]
res_batch_to_file_s = []
in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array)
for batch_id, cmdidx in enumerate(in_batches_cmdidx):
if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0`
res_batch_to_file_s.append(BatchToFile('STDIN'))
del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1]
else: # IN_BATCHx is TMPFILE
batch_to_file = BatchToFile('TMPFILE')
res_batch_to_file_s.append(batch_to_file)
res_cmd_array[cmdidx] = batch_to_file.tmpfile_path()
return (res_cmd_array, tuple(res_batch_to_file_s))
@staticmethod
@staticmethod
def _in_batches_cmdidx(cmd_array):
"""Raise `IndexError` if IN_BATCH0 - IN_BATCHx is not used sequentially in `cmd_array`
:returns: (IN_BATCH0's cmdidx, IN_BATCH1's cmdidx, ...)
$ cat a.txt IN_BATCH1 IN_BATCH0 b.txt c.txt IN_BATCH2 => (3, 2, 5)
"""
in_batches_cmdidx_dict = {}
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.in_batches_pat.match(tok)
if mat:
batch_idx = int(mat.group(1))
if batch_idx in in_batches_cmdidx_dict:
raise IndexError(
'IN_BATCH%d is used multiple times in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
in_batches_cmdidx_dict[batch_idx] = cmdidx
in_batches_cmdidx = []
for batch_idx in range(len(in_batches_cmdidx_dict)):
try:
cmdidx = in_batches_cmdidx_dict[batch_idx]
in_batches_cmdidx.append(cmdidx)
except KeyError:
raise IndexError('IN_BATCH%d is not found in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
return tuple(in_batches_cmdidx)
@staticmethod
def _out_batch_cmdidx(cmd_array):
"""Raise `IndexError` if OUT_BATCH is used multiple time
:returns: OUT_BATCH cmdidx (None if OUT_BATCH is not in `cmd_array`)
$ cat a.txt > OUT_BATCH => 3
"""
out_batch_cmdidx = None
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.out_batch_pat.match(tok)
if mat:
if out_batch_cmdidx:
raise IndexError(
'OUT_BATCH is used multiple times in command below:%s$ %s' %
(os.linesep, list2cmdline(cmd_array)))
out_batch_cmdidx = cmdidx
return out_batch_cmdidx
|
laysakura/relshell
|
relshell/batch_command.py
|
BatchCommand._in_batches_cmdidx
|
python
|
def _in_batches_cmdidx(cmd_array):
in_batches_cmdidx_dict = {}
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.in_batches_pat.match(tok)
if mat:
batch_idx = int(mat.group(1))
if batch_idx in in_batches_cmdidx_dict:
raise IndexError(
'IN_BATCH%d is used multiple times in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
in_batches_cmdidx_dict[batch_idx] = cmdidx
in_batches_cmdidx = []
for batch_idx in range(len(in_batches_cmdidx_dict)):
try:
cmdidx = in_batches_cmdidx_dict[batch_idx]
in_batches_cmdidx.append(cmdidx)
except KeyError:
raise IndexError('IN_BATCH%d is not found in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
return tuple(in_batches_cmdidx)
|
Raise `IndexError` if IN_BATCH0 - IN_BATCHx is not used sequentially in `cmd_array`
:returns: (IN_BATCH0's cmdidx, IN_BATCH1's cmdidx, ...)
$ cat a.txt IN_BATCH1 IN_BATCH0 b.txt c.txt IN_BATCH2 => (3, 2, 5)
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch_command.py#L113-L139
| null |
class BatchCommand(object):
"""BatchCommand"""
in_batches_pat = re.compile('IN_BATCH(\d+)')
"""Input batches"""
out_batch_pat = re.compile('OUT_BATCH')
"""Output batch"""
def __init__(self, batch_cmd):
"""Constructor
:param batch_cmd: command string w/ (IN|OUT)_BATCH*.
"""
(self.sh_cmd, self.batch_to_file_s, self.batch_from_file) = BatchCommand._parse(batch_cmd)
def has_input_from_stdin(self):
"""Return if any IN_BATCH* is input from stdin to process"""
for b2f in self.batch_to_file_s:
if b2f.is_stdin():
return True
return False
@staticmethod
def _parse(batch_cmd):
"""
:rtype: (sh_cmd, batch_to_file_s, batch_from_file)
:returns: parsed result like below:
.. code-block:: python
# when parsing 'diff IN_BATCH0 IN_BATCH1 > OUT_BATCH'
(
'diff /tmp/relshell-AbCDeF /tmp/relshell-uVwXyz',
( <instance of BatchToFile>, <instance of BatchToFile> ) # (IN_BATCH0, IN_BATCH1)
'STDOUT',
)
"""
cmd_array = shlex.split(batch_cmd)
(cmd_array, batch_to_file_s) = BatchCommand._parse_in_batches(cmd_array)
(cmd_array, batch_from_file) = BatchCommand._parse_out_batch(cmd_array)
return (list2cmdline(cmd_array), batch_to_file_s, batch_from_file)
@staticmethod
def _parse_in_batches(cmd_array):
"""Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], ( batch_to_file, batch_to_file, ... ) )
:returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN).
Returned `cmd_array` drops IN_BATCH related tokens.
:raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0
"""
res_cmd_array = cmd_array[:]
res_batch_to_file_s = []
in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array)
for batch_id, cmdidx in enumerate(in_batches_cmdidx):
if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0`
res_batch_to_file_s.append(BatchToFile('STDIN'))
del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1]
else: # IN_BATCHx is TMPFILE
batch_to_file = BatchToFile('TMPFILE')
res_batch_to_file_s.append(batch_to_file)
res_cmd_array[cmdidx] = batch_to_file.tmpfile_path()
return (res_cmd_array, tuple(res_batch_to_file_s))
@staticmethod
def _parse_out_batch(cmd_array):
"""Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], batch_from_file)
:returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT).
Returned `cmd_array` drops OUT_BATCH related tokens.
:raises: `IndexError` if multiple OUT_BATCH are found
"""
res_cmd_array = cmd_array[:]
res_batch_from_file = None
out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array)
if out_batch_cmdidx is None:
return (res_cmd_array, res_batch_from_file)
if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH`
res_batch_from_file = BatchFromFile('STDOUT')
del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1]
else: # OUT_BATCH is TMPFILE
res_batch_from_file = BatchFromFile('TMPFILE')
res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path()
return (res_cmd_array, res_batch_from_file)
@staticmethod
@staticmethod
def _out_batch_cmdidx(cmd_array):
"""Raise `IndexError` if OUT_BATCH is used multiple time
:returns: OUT_BATCH cmdidx (None if OUT_BATCH is not in `cmd_array`)
$ cat a.txt > OUT_BATCH => 3
"""
out_batch_cmdidx = None
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.out_batch_pat.match(tok)
if mat:
if out_batch_cmdidx:
raise IndexError(
'OUT_BATCH is used multiple times in command below:%s$ %s' %
(os.linesep, list2cmdline(cmd_array)))
out_batch_cmdidx = cmdidx
return out_batch_cmdidx
|
laysakura/relshell
|
relshell/batch_command.py
|
BatchCommand._out_batch_cmdidx
|
python
|
def _out_batch_cmdidx(cmd_array):
out_batch_cmdidx = None
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.out_batch_pat.match(tok)
if mat:
if out_batch_cmdidx:
raise IndexError(
'OUT_BATCH is used multiple times in command below:%s$ %s' %
(os.linesep, list2cmdline(cmd_array)))
out_batch_cmdidx = cmdidx
return out_batch_cmdidx
|
Raise `IndexError` if OUT_BATCH is used multiple time
:returns: OUT_BATCH cmdidx (None if OUT_BATCH is not in `cmd_array`)
$ cat a.txt > OUT_BATCH => 3
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch_command.py#L142-L157
| null |
class BatchCommand(object):
"""BatchCommand"""
in_batches_pat = re.compile('IN_BATCH(\d+)')
"""Input batches"""
out_batch_pat = re.compile('OUT_BATCH')
"""Output batch"""
def __init__(self, batch_cmd):
"""Constructor
:param batch_cmd: command string w/ (IN|OUT)_BATCH*.
"""
(self.sh_cmd, self.batch_to_file_s, self.batch_from_file) = BatchCommand._parse(batch_cmd)
def has_input_from_stdin(self):
"""Return if any IN_BATCH* is input from stdin to process"""
for b2f in self.batch_to_file_s:
if b2f.is_stdin():
return True
return False
@staticmethod
def _parse(batch_cmd):
"""
:rtype: (sh_cmd, batch_to_file_s, batch_from_file)
:returns: parsed result like below:
.. code-block:: python
# when parsing 'diff IN_BATCH0 IN_BATCH1 > OUT_BATCH'
(
'diff /tmp/relshell-AbCDeF /tmp/relshell-uVwXyz',
( <instance of BatchToFile>, <instance of BatchToFile> ) # (IN_BATCH0, IN_BATCH1)
'STDOUT',
)
"""
cmd_array = shlex.split(batch_cmd)
(cmd_array, batch_to_file_s) = BatchCommand._parse_in_batches(cmd_array)
(cmd_array, batch_from_file) = BatchCommand._parse_out_batch(cmd_array)
return (list2cmdline(cmd_array), batch_to_file_s, batch_from_file)
@staticmethod
def _parse_in_batches(cmd_array):
"""Find patterns that match to `in_batches_pat` and replace them into `STDIN` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], ( batch_to_file, batch_to_file, ... ) )
:returns: Modified `cmd_array` and tuple to show how each IN_BATCH is instantiated (TMPFILE or STDIN).
Returned `cmd_array` drops IN_BATCH related tokens.
:raises: `IndexError` if IN_BATCHes don't have sequential ID starting from 0
"""
res_cmd_array = cmd_array[:]
res_batch_to_file_s = []
in_batches_cmdidx = BatchCommand._in_batches_cmdidx(cmd_array)
for batch_id, cmdidx in enumerate(in_batches_cmdidx):
if cmdidx > 0 and cmd_array[cmdidx - 1] == '<': # e.g. `< IN_BATCH0`
res_batch_to_file_s.append(BatchToFile('STDIN'))
del res_cmd_array[cmdidx], res_cmd_array[cmdidx - 1]
else: # IN_BATCHx is TMPFILE
batch_to_file = BatchToFile('TMPFILE')
res_batch_to_file_s.append(batch_to_file)
res_cmd_array[cmdidx] = batch_to_file.tmpfile_path()
return (res_cmd_array, tuple(res_batch_to_file_s))
@staticmethod
def _parse_out_batch(cmd_array):
"""Find patterns that match to `out_batch_pat` and replace them into `STDOUT` or `TMPFILE`.
:param cmd_array: `shlex.split`-ed command
:rtype: ([cmd_array], batch_from_file)
:returns: Modified `cmd_array` and tuple to show how OUT_BATCH is instantiated (TMPFILE or STDOUT).
Returned `cmd_array` drops OUT_BATCH related tokens.
:raises: `IndexError` if multiple OUT_BATCH are found
"""
res_cmd_array = cmd_array[:]
res_batch_from_file = None
out_batch_cmdidx = BatchCommand._out_batch_cmdidx(cmd_array)
if out_batch_cmdidx is None:
return (res_cmd_array, res_batch_from_file)
if out_batch_cmdidx > 0 and cmd_array[out_batch_cmdidx - 1] == '>': # e.g. `> OUT_BATCH`
res_batch_from_file = BatchFromFile('STDOUT')
del res_cmd_array[out_batch_cmdidx], res_cmd_array[out_batch_cmdidx - 1]
else: # OUT_BATCH is TMPFILE
res_batch_from_file = BatchFromFile('TMPFILE')
res_cmd_array[out_batch_cmdidx] = res_batch_from_file.tmpfile_path()
return (res_cmd_array, res_batch_from_file)
@staticmethod
def _in_batches_cmdidx(cmd_array):
"""Raise `IndexError` if IN_BATCH0 - IN_BATCHx is not used sequentially in `cmd_array`
:returns: (IN_BATCH0's cmdidx, IN_BATCH1's cmdidx, ...)
$ cat a.txt IN_BATCH1 IN_BATCH0 b.txt c.txt IN_BATCH2 => (3, 2, 5)
"""
in_batches_cmdidx_dict = {}
for cmdidx, tok in enumerate(cmd_array):
mat = BatchCommand.in_batches_pat.match(tok)
if mat:
batch_idx = int(mat.group(1))
if batch_idx in in_batches_cmdidx_dict:
raise IndexError(
'IN_BATCH%d is used multiple times in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
in_batches_cmdidx_dict[batch_idx] = cmdidx
in_batches_cmdidx = []
for batch_idx in range(len(in_batches_cmdidx_dict)):
try:
cmdidx = in_batches_cmdidx_dict[batch_idx]
in_batches_cmdidx.append(cmdidx)
except KeyError:
raise IndexError('IN_BATCH%d is not found in command below, while IN_BATCH0 - IN_BATCH%d must be used:%s$ %s' %
(batch_idx, len(in_batches_cmdidx_dict) - 1, os.linesep, list2cmdline(cmd_array)))
return tuple(in_batches_cmdidx)
@staticmethod
|
laysakura/relshell
|
relshell/batch.py
|
Batch.next
|
python
|
def next(self):
if self._records_iter >= len(self._records):
raise StopIteration
self._records_iter += 1
return self._records[self._records_iter - 1]
|
Return one of record in this batch in out-of-order.
:raises: `StopIteration` when no more record is in this batch
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch.py#L39-L47
| null |
class Batch(object):
"""Set of records"""
def __init__(self, record_def, records, check_datatype=True):
"""Create an *immutable* batch of records
:param record_def: instance of `RecordDef <#relshell.recorddef.RecordDef>`_
:param records: records. Leftmost element is oldest (has to be treated earlier).
:type records: instance of `tuple`
:raises: `TypeError` when any record has mismatched type with :param:`record_def`
"""
# check each record type
if check_datatype:
map(lambda r: Record._chk_type(record_def, r), records)
self._rdef = record_def
self._records = records
self._records_iter = 0 # column number to iterate over
def record_def(self):
"""Return instance of :class:`RecordDef`"""
return self._rdef
def __iter__(self):
return self
def __str__(self):
return self.formatted_str('json')
def formatted_str(self, format):
"""Return formatted str.
:param format: one of 'json', 'csv' are supported
"""
assert(format in ('json', 'csv'))
ret_str_list = []
for rec in self._records:
if format == 'json':
ret_str_list.append('{')
for i in xrange(len(rec)):
colname, colval = self._rdef[i].name, rec[i]
ret_str_list.append('"%s":"%s"' % (colname, str(colval).replace('"', r'\"')))
ret_str_list.append(',')
ret_str_list.pop() # drop last comma
ret_str_list.append('}%s' % (os.linesep))
elif format == 'csv':
for i in xrange(len(rec)):
colval = rec[i]
ret_str_list.append('"%s"' % (str(colval).replace('"', r'\"')))
ret_str_list.append(',')
ret_str_list.pop() # drop last comma
ret_str_list.append('%s' % (os.linesep))
else:
assert(False)
return ''.join(ret_str_list)
def __eq__(self, other):
if len(self._records) != len(other._records):
return False
for i in xrange(len(self._records)):
if self._records[i] != other._records[i]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self._records)
|
laysakura/relshell
|
relshell/batch.py
|
Batch.formatted_str
|
python
|
def formatted_str(self, format):
assert(format in ('json', 'csv'))
ret_str_list = []
for rec in self._records:
if format == 'json':
ret_str_list.append('{')
for i in xrange(len(rec)):
colname, colval = self._rdef[i].name, rec[i]
ret_str_list.append('"%s":"%s"' % (colname, str(colval).replace('"', r'\"')))
ret_str_list.append(',')
ret_str_list.pop() # drop last comma
ret_str_list.append('}%s' % (os.linesep))
elif format == 'csv':
for i in xrange(len(rec)):
colval = rec[i]
ret_str_list.append('"%s"' % (str(colval).replace('"', r'\"')))
ret_str_list.append(',')
ret_str_list.pop() # drop last comma
ret_str_list.append('%s' % (os.linesep))
else:
assert(False)
return ''.join(ret_str_list)
|
Return formatted str.
:param format: one of 'json', 'csv' are supported
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/batch.py#L52-L77
| null |
class Batch(object):
"""Set of records"""
def __init__(self, record_def, records, check_datatype=True):
"""Create an *immutable* batch of records
:param record_def: instance of `RecordDef <#relshell.recorddef.RecordDef>`_
:param records: records. Leftmost element is oldest (has to be treated earlier).
:type records: instance of `tuple`
:raises: `TypeError` when any record has mismatched type with :param:`record_def`
"""
# check each record type
if check_datatype:
map(lambda r: Record._chk_type(record_def, r), records)
self._rdef = record_def
self._records = records
self._records_iter = 0 # column number to iterate over
def record_def(self):
"""Return instance of :class:`RecordDef`"""
return self._rdef
def __iter__(self):
return self
def next(self):
"""Return one of record in this batch in out-of-order.
:raises: `StopIteration` when no more record is in this batch
"""
if self._records_iter >= len(self._records):
raise StopIteration
self._records_iter += 1
return self._records[self._records_iter - 1]
def __str__(self):
return self.formatted_str('json')
def __eq__(self, other):
if len(self._records) != len(other._records):
return False
for i in xrange(len(self._records)):
if self._records[i] != other._records[i]:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __len__(self):
return len(self._records)
|
laysakura/relshell
|
relshell/record.py
|
Record.next
|
python
|
def next(self):
if self._cur_col >= len(self._rec):
self._cur_col = 0
raise StopIteration
col = self._rec[self._cur_col]
self._cur_col += 1
return col
|
Return a column one by one
:raises: StopIteration
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/record.py#L42-L52
| null |
class Record(object):
"""Record."""
# APIs
def __init__(self, *columns):
"""Creates a record with `record_def` constraints.
:param \*columns: contents of columns
"""
self._rec = Record._internal_repl(columns)
self._cur_col = 0 # Used for `next()`
def __str__(self):
"""Returns string representation of record"""
retstr_list = ['(']
for i in xrange(len(self._rec)):
retstr_list.append('"%s", ' % (self._rec[i]))
retstr_list.append(')')
return ''.join(retstr_list)
def __len__(self):
"""Returns number of columns in record"""
return len(self._rec)
def __getitem__(self, index):
"""Returns column data specified by `index`"""
return self._rec[index]
def __iter__(self):
return self
def __eq__(self, other):
return self._rec == other._rec
def __ne__(self, other):
return not self.__eq__(other)
# Private functions
@staticmethod
def _internal_repl(columns):
return tuple(columns)
@staticmethod
def _chk_type(recdef, rec):
"""Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError`
"""
if len(recdef) != len(rec):
raise TypeError("Number of columns (%d) is different from RecordDef (%d)" % (len(rec), len(recdef)))
for i in xrange(len(recdef)):
try:
def_type = recdef[i].type
col_type = Type.equivalent_relshell_type(rec[i])
if col_type != def_type:
raise TypeError("Column %d has mismatched type: Got '%s' [%s] ; Expected [%s]" %
(i, rec[i], col_type, def_type))
except AttributeError as e:
# recdef[i].type is not defined, then any relshell type is allowed
try:
Type.equivalent_relshell_type(rec[i])
except NotImplementedError as e:
raise TypeError("%s" % (e))
|
laysakura/relshell
|
relshell/record.py
|
Record._chk_type
|
python
|
def _chk_type(recdef, rec):
if len(recdef) != len(rec):
raise TypeError("Number of columns (%d) is different from RecordDef (%d)" % (len(rec), len(recdef)))
for i in xrange(len(recdef)):
try:
def_type = recdef[i].type
col_type = Type.equivalent_relshell_type(rec[i])
if col_type != def_type:
raise TypeError("Column %d has mismatched type: Got '%s' [%s] ; Expected [%s]" %
(i, rec[i], col_type, def_type))
except AttributeError as e:
# recdef[i].type is not defined, then any relshell type is allowed
try:
Type.equivalent_relshell_type(rec[i])
except NotImplementedError as e:
raise TypeError("%s" % (e))
|
Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError`
|
train
|
https://github.com/laysakura/relshell/blob/9ca5c03a34c11cb763a4a75595f18bf4383aa8cc/relshell/record.py#L66-L87
|
[
"def equivalent_relshell_type(val):\n \"\"\"Returns `val`'s relshell compatible type.\n\n :param val: value to check relshell equivalent type\n :raises: `NotImplementedError` if val's relshell compatible type is not implemented.\n \"\"\"\n builtin_type = type(val)\n if builtin_type not in Type._typemap:\n raise NotImplementedError(\"builtin type %s is not convertible to relshell type\" %\n (builtin_type))\n relshell_type_str = Type._typemap[builtin_type]\n return Type(relshell_type_str)\n"
] |
class Record(object):
"""Record."""
# APIs
def __init__(self, *columns):
"""Creates a record with `record_def` constraints.
:param \*columns: contents of columns
"""
self._rec = Record._internal_repl(columns)
self._cur_col = 0 # Used for `next()`
def __str__(self):
"""Returns string representation of record"""
retstr_list = ['(']
for i in xrange(len(self._rec)):
retstr_list.append('"%s", ' % (self._rec[i]))
retstr_list.append(')')
return ''.join(retstr_list)
def __len__(self):
"""Returns number of columns in record"""
return len(self._rec)
def __getitem__(self, index):
"""Returns column data specified by `index`"""
return self._rec[index]
def __iter__(self):
return self
def next(self):
"""Return a column one by one
:raises: StopIteration
"""
if self._cur_col >= len(self._rec):
self._cur_col = 0
raise StopIteration
col = self._rec[self._cur_col]
self._cur_col += 1
return col
def __eq__(self, other):
return self._rec == other._rec
def __ne__(self, other):
return not self.__eq__(other)
# Private functions
@staticmethod
def _internal_repl(columns):
return tuple(columns)
@staticmethod
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.headers
|
python
|
def headers(self):
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
|
Return headers of the uploader instance. This would include the headers of the
client instance.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L139-L145
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.headers_as_list
|
python
|
def headers_as_list(self):
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
|
Does the same as 'headers' except it is returned as a list.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L148-L154
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.get_offset
|
python
|
def get_offset(self):
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
|
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L170-L182
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.encode_metadata
|
python
|
def encode_metadata(self):
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
|
Return list of encoded metadata as defined by the Tus protocol.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L184-L199
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.get_url
|
python
|
def get_url(self):
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
|
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L201-L216
|
[
"def get_file_stream(self):\n \"\"\"\n Return a file stream instance of the upload.\n \"\"\"\n if self.file_stream:\n self.file_stream.seek(0)\n return self.file_stream\n elif os.path.isfile(self.file_path):\n return open(self.file_path, 'rb')\n else:\n raise ValueError(\"invalid file {}\".format(self.file_path))\n"
] |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.create_url
|
python
|
def create_url(self):
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
|
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L219-L233
|
[
"def encode_metadata(self):\n \"\"\"\n Return list of encoded metadata as defined by the Tus protocol.\n \"\"\"\n encoded_list = []\n for key, value in iteritems(self.metadata):\n key_str = str(key) # dict keys may be of any object type.\n\n # confirm that the key does not contain unwanted characters.\n if re.search(r'^$|[\\s,]+', key_str):\n msg = 'Upload-metadata key \"{}\" cannot be empty nor contain spaces or commas.'\n raise ValueError(msg.format(key_str))\n\n value_bytes = b(value) # python 3 only encodes bytes\n encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))\n return encoded_list\n"
] |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.request_length
|
python
|
def request_length(self):
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
|
Return length of next chunk upload.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L236-L241
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.verify_upload
|
python
|
def verify_upload(self):
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
|
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L243-L251
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.get_file_stream
|
python
|
def get_file_stream(self):
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
|
Return a file stream instance of the upload.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L253-L263
| null |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.file_size
|
python
|
def file_size(self):
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
|
Return size of the file.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L266-L272
|
[
"def get_file_stream(self):\n \"\"\"\n Return a file stream instance of the upload.\n \"\"\"\n if self.file_stream:\n self.file_stream.seek(0)\n return self.file_stream\n elif os.path.isfile(self.file_path):\n return open(self.file_path, 'rb')\n else:\n raise ValueError(\"invalid file {}\".format(self.file_path))\n"
] |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.upload
|
python
|
def upload(self, stop_at=None):
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
|
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L274-L292
|
[
"def upload_chunk(self):\n \"\"\"\n Upload chunk of file.\n \"\"\"\n self._retried = 0\n self._do_request()\n self.offset = int(self.request.response_headers.get('upload-offset'))\n if self.log_func:\n msg = '{} bytes uploaded ...'.format(self.offset)\n self.log_func(msg)\n"
] |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload_chunk(self):
"""
Upload chunk of file.
"""
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/uploader.py
|
Uploader.upload_chunk
|
python
|
def upload_chunk(self):
self._retried = 0
self._do_request()
self.offset = int(self.request.response_headers.get('upload-offset'))
if self.log_func:
msg = '{} bytes uploaded ...'.format(self.offset)
self.log_func(msg)
|
Upload chunk of file.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/uploader.py#L294-L303
|
[
"def _do_request(self):\n # TODO: Maybe the request should not be re-created everytime.\n # The request handle could be left open until upload is done instead.\n self.request = TusRequest(self)\n try:\n self.request.perform()\n self.verify_upload()\n except TusUploadFailed as error:\n self.request.close()\n self._retry_or_cry(error)\n finally:\n self.request.close()\n"
] |
class Uploader(object):
"""
Object to control upload related functions.
:Attributes:
- file_path (str):
This is the path(absolute/relative) to the file that is intended for upload
to the tus server. On instantiation this attribute is required.
- file_stream (file):
As an alternative to the `file_path`, an instance of the file to be uploaded
can be passed to the constructor as `file_stream`. Do note that either the
`file_stream` or the `file_path` must be passed on instantiation.
- url (str):
If the upload url for the file is known, it can be passed to the constructor.
This may happen when you resume an upload.
- client (<tusclient.client.TusClient>):
An instance of `tusclient.client.TusClient`. This would tell the uploader instance
what client it is operating with. Although this argument is optional, it is only
optional if the 'url' argument is specified.
- chunk_size (int):
This tells the uploader what chunk size(in bytes) should be uploaded when the
method `upload_chunk` is called. This defaults to the maximum possible integer if not
specified.
- metadata (dict):
A dictionary containing the upload-metadata. This would be encoded internally
by the method `encode_metadata` to conform with the tus protocol.
- offset (int):
The offset value of the upload indicates the current position of the file upload.
- stop_at (int):
At what offset value the upload should stop.
- request (<tusclient.request.TusRequest>):
A http Request instance of the last chunk uploaded.
- retries (int):
The number of attempts the uploader should make in the case of a failed upload.
If not specified, it defaults to 0.
- retry_delay (int):
How long (in seconds) the uploader should wait before retrying a failed upload attempt.
If not specified, it defaults to 30.
- store_url (bool):
Determines whether or not url should be stored, and uploads should be resumed.
- url_storage (<tusclient.storage.interface.Storage>):
An implementation of <tusclient.storage.interface.Storage> which is an API for URL storage.
This value must be set if store_url is set to true. A ready to use implementation exists atbe used out of the box. But you can
implement your own custom storage API and pass an instace of it as value.
- fingerprinter (<tusclient.fingerprint.interface.Fingerprint>):
An implementation of <tusclient.fingerprint.interface.Fingerprint> which is an API to generate
a unique fingerprint for the uploaded file. This is used for url storage when resumability is enabled.
if store_url is set to true, the default fingerprint module (<tusclient.fingerprint.fingerprint.Fingerprint>)
would be used. But you can set your own custom fingerprint module by passing it to the constructor.
- log_func (<function>):
A logging function to be passed diagnostic messages during file uploads
- upload_checksum (bool):
Whether or not to supply the Upload-Checksum header along with each
chunk. Defaults to False.
:Constructor Args:
- file_path (str)
- file_stream (Optional[file])
- url (Optional[str])
- client (Optional [<tusclient.client.TusClient>])
- chunk_size (Optional[int])
- metadata (Optional[dict])
- retries (Optional[int])
- retry_delay (Optional[int])
- store_url (Optional[bool])
- url_storage (Optinal [<tusclient.storage.interface.Storage>])
- fingerprinter (Optional [<tusclient.fingerprint.interface.Fingerprint>])
- log_func (Optional [<function>])
- upload_checksum (Optional[bool])
"""
DEFAULT_HEADERS = {"Tus-Resumable": "1.0.0"}
DEFAULT_CHUNK_SIZE = MAXSIZE
CHECKSUM_ALGORITHM_PAIR = ("sha1", hashlib.sha1, )
def __init__(self, file_path=None, file_stream=None, url=None, client=None,
chunk_size=None, metadata=None, retries=0, retry_delay=30,
store_url=False, url_storage=None, fingerprinter=None,
log_func=None, upload_checksum=False):
if file_path is None and file_stream is None:
raise ValueError("Either 'file_path' or 'file_stream' cannot be None.")
if url is None and client is None:
raise ValueError("Either 'url' or 'client' cannot be None.")
if store_url and url_storage is None:
raise ValueError("Please specify a storage instance to enable resumablility.")
self.file_path = file_path
self.file_stream = file_stream
self.stop_at = self.file_size
self.client = client
self.metadata = metadata or {}
self.store_url = store_url
self.url_storage = url_storage
self.fingerprinter = fingerprinter or fingerprint.Fingerprint()
self.url = url or self.get_url()
self.offset = self.get_offset()
self.chunk_size = chunk_size or self.DEFAULT_CHUNK_SIZE
self.request = None
self.retries = retries
self._retried = 0
self.retry_delay = retry_delay
self.log_func = log_func
self.upload_checksum = upload_checksum
self.__checksum_algorithm_name, self.__checksum_algorithm = \
self.CHECKSUM_ALGORITHM_PAIR
# it is important to have this as a @property so it gets
# updated client headers.
@property
def headers(self):
"""
Return headers of the uploader instance. This would include the headers of the
client instance.
"""
client_headers = getattr(self.client, 'headers', {})
return dict(self.DEFAULT_HEADERS, **client_headers)
@property
def headers_as_list(self):
"""
Does the same as 'headers' except it is returned as a list.
"""
headers = self.headers
headers_list = ['{}: {}'.format(key, value) for key, value in iteritems(headers)]
return headers_list
@property
def checksum_algorithm(self):
"""The checksum algorithm to be used for the Upload-Checksum extension.
"""
return self.__checksum_algorithm
@property
def checksum_algorithm_name(self):
"""The name of the checksum algorithm to be used for the Upload-Checksum
extension.
"""
return self.__checksum_algorithm_name
@_catch_requests_error
def get_offset(self):
"""
Return offset from tus server.
This is different from the instance attribute 'offset' because this makes an
http request to the tus server to retrieve the offset.
"""
resp = requests.head(self.url, headers=self.headers)
offset = resp.headers.get('upload-offset')
if offset is None:
msg = 'Attempt to retrieve offset fails with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return int(offset)
def encode_metadata(self):
"""
Return list of encoded metadata as defined by the Tus protocol.
"""
encoded_list = []
for key, value in iteritems(self.metadata):
key_str = str(key) # dict keys may be of any object type.
# confirm that the key does not contain unwanted characters.
if re.search(r'^$|[\s,]+', key_str):
msg = 'Upload-metadata key "{}" cannot be empty nor contain spaces or commas.'
raise ValueError(msg.format(key_str))
value_bytes = b(value) # python 3 only encodes bytes
encoded_list.append('{} {}'.format(key_str, b64encode(value_bytes).decode('ascii')))
return encoded_list
def get_url(self):
"""
Return the tus upload url.
If resumability is enabled, this would try to get the url from storage if available,
otherwise it would request a new upload url from the tus server.
"""
if self.store_url and self.url_storage:
key = self.fingerprinter.get_fingerprint(self.get_file_stream())
url = self.url_storage.get_item(key)
if not url:
url = self.create_url()
self.url_storage.set_item(key, url)
return url
else:
return self.create_url()
@_catch_requests_error
def create_url(self):
"""
Return upload url.
Makes request to tus server to create a new upload url for the required file upload.
"""
headers = self.headers
headers['upload-length'] = str(self.file_size)
headers['upload-metadata'] = ','.join(self.encode_metadata())
resp = requests.post(self.client.url, headers=headers)
url = resp.headers.get("location")
if url is None:
msg = 'Attempt to retrieve create file url with status {}'.format(resp.status_code)
raise TusCommunicationError(msg, resp.status_code, resp.content)
return urljoin(self.client.url, url)
@property
def request_length(self):
"""
Return length of next chunk upload.
"""
remainder = self.stop_at - self.offset
return self.chunk_size if remainder > self.chunk_size else remainder
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content)
def get_file_stream(self):
"""
Return a file stream instance of the upload.
"""
if self.file_stream:
self.file_stream.seek(0)
return self.file_stream
elif os.path.isfile(self.file_path):
return open(self.file_path, 'rb')
else:
raise ValueError("invalid file {}".format(self.file_path))
@property
def file_size(self):
"""
Return size of the file.
"""
stream = self.get_file_stream()
stream.seek(0, os.SEEK_END)
return stream.tell()
def upload(self, stop_at=None):
"""
Perform file upload.
Performs continous upload of chunks of the file. The size uploaded at each cycle is
the value of the attribute 'chunk_size'.
:Args:
- stop_at (Optional[int]):
Determines at what offset value the upload should stop. If not specified this
defaults to the file size.
"""
self.stop_at = stop_at or self.file_size
while self.offset < self.stop_at:
self.upload_chunk()
else:
if self.log_func:
self.log_func("maximum upload specified({} bytes) has been reached".format(self.stop_at))
def _do_request(self):
# TODO: Maybe the request should not be re-created everytime.
# The request handle could be left open until upload is done instead.
self.request = TusRequest(self)
try:
self.request.perform()
self.verify_upload()
except TusUploadFailed as error:
self.request.close()
self._retry_or_cry(error)
finally:
self.request.close()
def _retry_or_cry(self, error):
if self.retries > self._retried:
time.sleep(self.retry_delay)
self._retried += 1
try:
self.offset = self.get_offset()
except TusCommunicationError as e:
self._retry_or_cry(e)
else:
self._do_request()
else:
raise error
|
tus/tus-py-client
|
tusclient/storage/filestorage.py
|
FileStorage.get_item
|
python
|
def get_item(self, key):
result = self._db.search(self._urls.key == key)
return result[0].get('url') if result else None
|
Return the tus url of a file, identified by the key specified.
:Args:
- key[str]: The unique id for the stored item (in this case, url)
:Returns: url[str]
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/storage/filestorage.py#L14-L23
| null |
class FileStorage(interface.Storage):
def __init__(self, fp):
self._db = TinyDB(fp)
self._urls = Query()
def set_item(self, key, url):
"""
Store the url value under the unique key.
:Args:
- key[str]: The unique id to which the item (in this case, url) would be stored.
- value[str]: The actual url value to be stored.
"""
if self._db.search(self._urls.key == key):
self._db.update({'url': url}, self._urls.key == key)
else:
self._db.insert({'key': key, 'url': url})
def remove_item(self, key):
"""
Remove/Delete the url value under the unique key from storage.
"""
self._db.remove(self._urls.key==key)
|
tus/tus-py-client
|
tusclient/storage/filestorage.py
|
FileStorage.set_item
|
python
|
def set_item(self, key, url):
if self._db.search(self._urls.key == key):
self._db.update({'url': url}, self._urls.key == key)
else:
self._db.insert({'key': key, 'url': url})
|
Store the url value under the unique key.
:Args:
- key[str]: The unique id to which the item (in this case, url) would be stored.
- value[str]: The actual url value to be stored.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/storage/filestorage.py#L25-L36
| null |
class FileStorage(interface.Storage):
def __init__(self, fp):
self._db = TinyDB(fp)
self._urls = Query()
def get_item(self, key):
"""
Return the tus url of a file, identified by the key specified.
:Args:
- key[str]: The unique id for the stored item (in this case, url)
:Returns: url[str]
"""
result = self._db.search(self._urls.key == key)
return result[0].get('url') if result else None
def remove_item(self, key):
"""
Remove/Delete the url value under the unique key from storage.
"""
self._db.remove(self._urls.key==key)
|
tus/tus-py-client
|
tusclient/storage/filestorage.py
|
FileStorage.remove_item
|
python
|
def remove_item(self, key):
self._db.remove(self._urls.key==key)
|
Remove/Delete the url value under the unique key from storage.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/storage/filestorage.py#L38-L42
| null |
class FileStorage(interface.Storage):
def __init__(self, fp):
self._db = TinyDB(fp)
self._urls = Query()
def get_item(self, key):
"""
Return the tus url of a file, identified by the key specified.
:Args:
- key[str]: The unique id for the stored item (in this case, url)
:Returns: url[str]
"""
result = self._db.search(self._urls.key == key)
return result[0].get('url') if result else None
def set_item(self, key, url):
"""
Store the url value under the unique key.
:Args:
- key[str]: The unique id to which the item (in this case, url) would be stored.
- value[str]: The actual url value to be stored.
"""
if self._db.search(self._urls.key == key):
self._db.update({'url': url}, self._urls.key == key)
else:
self._db.insert({'key': key, 'url': url})
|
tus/tus-py-client
|
tusclient/request.py
|
TusRequest.perform
|
python
|
def perform(self):
try:
host = '{}://{}'.format(self._url.scheme, self._url.netloc)
path = self._url.geturl().replace(host, '', 1)
chunk = self.file.read(self._content_length)
if self._upload_checksum:
self._request_headers["upload-checksum"] = \
" ".join((
self._checksum_algorithm_name,
base64.b64encode(
self._checksum_algorithm(chunk).digest()
).decode("ascii"),
))
self.handle.request("PATCH", path, chunk, self._request_headers)
self._response = self.handle.getresponse()
self.status_code = self._response.status
self.response_headers = {k.lower(): v for k, v in self._response.getheaders()}
except http.client.HTTPException as e:
raise TusUploadFailed(e)
# wrap connection related errors not raised by the http.client.HTTP(S)Connection
# as TusUploadFailed exceptions to enable retries
except OSError as e:
if e.errno in (errno.EPIPE, errno.ESHUTDOWN, errno.ECONNABORTED, errno.ECONNREFUSED, errno.ECONNRESET):
raise TusUploadFailed(e)
raise e
|
Perform actual request.
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/request.py#L56-L84
| null |
class TusRequest(object):
"""
Http Request Abstraction.
Sets up tus custom http request on instantiation.
requires argument 'uploader' an instance of tusclient.uploader.Uploader
on instantiation.
:Attributes:
- handle (<http.client.HTTPConnection>)
- response_headers (dict)
- file (file):
The file that is being uploaded.
"""
def __init__(self, uploader):
url = urlparse(uploader.url)
if url.scheme == 'https':
self.handle = http.client.HTTPSConnection(url.hostname, url.port)
else:
self.handle = http.client.HTTPConnection(url.hostname, url.port)
self._url = url
self.response_headers = {}
self.status_code = None
self.file = uploader.get_file_stream()
self.file.seek(uploader.offset)
self._request_headers = {
'upload-offset': uploader.offset,
'Content-Type': 'application/offset+octet-stream'
}
self._request_headers.update(uploader.headers)
self._content_length = uploader.request_length
self._upload_checksum = uploader.upload_checksum
self._checksum_algorithm = uploader.checksum_algorithm
self._checksum_algorithm_name = uploader.checksum_algorithm_name
self._response = None
@property
def response_content(self):
"""
Return response data
"""
return self._response.read()
def close(self):
"""
close request handle and end request session
"""
self.handle.close()
|
tus/tus-py-client
|
tusclient/fingerprint/fingerprint.py
|
Fingerprint.get_fingerprint
|
python
|
def get_fingerprint(self, fs):
hasher = hashlib.md5()
# we encode the content to avoid python 3 uncicode errors
buf = self._encode_data(fs.read(self.BLOCK_SIZE))
while len(buf) > 0:
hasher.update(buf)
buf = fs.read(self.BLOCK_SIZE)
return 'md5:' + hasher.hexdigest()
|
Return a unique fingerprint string value based on the file stream recevied
:Args:
- fs[file]: The file stream instance of the file for which a fingerprint would be generated.
:Returns: fingerprint[str]
|
train
|
https://github.com/tus/tus-py-client/blob/0e5856efcfae6fc281171359ce38488a70468993/tusclient/fingerprint/fingerprint.py#L15-L29
|
[
"def _encode_data(self, data):\n try:\n return b(data)\n except AttributeError:\n # in case the content is already binary, this failure would happen.\n return data\n"
] |
class Fingerprint(interface.Fingerprint):
BLOCK_SIZE = 65536
def _encode_data(self, data):
try:
return b(data)
except AttributeError:
# in case the content is already binary, this failure would happen.
return data
|
crate/crash
|
src/crate/crash/tabulate.py
|
_padleft
|
python
|
def _padleft(width, s, has_invisible=True):
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
|
Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L392-L406
| null |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import re
import io
from collections import namedtuple
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
|
crate/crash
|
src/crate/crash/tabulate.py
|
_visible_width
|
python
|
def _visible_width(s):
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
|
Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L468-L478
| null |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import re
import io
from collections import namedtuple
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
|
crate/crash
|
src/crate/crash/tabulate.py
|
_column_type
|
python
|
def _column_type(values, has_invisible=True):
return reduce(_more_generic, [type(v) for v in values], int)
|
The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L582-L602
| null |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import re
import io
from collections import namedtuple
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
|
crate/crash
|
src/crate/crash/tabulate.py
|
_normalize_tabular_data
|
python
|
def _normalize_tabular_data(tabular_data, headers):
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
|
Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L659-L767
| null |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import re
import io
from collections import namedtuple
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
|
crate/crash
|
src/crate/crash/tabulate.py
|
tabulate
|
python
|
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
|
Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L770-L1055
|
[
"def _is_multiline(s):\n if isinstance(s, _text_type):\n return bool(re.search(_multiline_codes, s))\n else: # a bytestring\n return bool(re.search(_multiline_codes_bytes, s))\n",
"def _choose_width_fn(has_invisible, enable_widechars, is_multiline):\n \"\"\"Return a function to calculate visible cell width.\"\"\"\n if has_invisible:\n line_width_fn = _visible_width\n elif enable_widechars: # optional wide-character support if available\n line_width_fn = wcwidth.wcswidth\n else:\n line_width_fn = len\n if is_multiline:\n width_fn = lambda s: _multiline_width(s, line_width_fn)\n else:\n width_fn = line_width_fn\n return width_fn\n",
"def _normalize_tabular_data(tabular_data, headers):\n \"\"\"Transform a supported data type to a list of lists, and a list of headers.\n\n Supported tabular data types:\n\n * list-of-lists or another iterable of iterables\n\n * list of named tuples (usually used with headers=\"keys\")\n\n * list of dicts (usually used with headers=\"keys\")\n\n * list of OrderedDicts (usually used with headers=\"keys\")\n\n * 2D NumPy arrays\n\n * NumPy record arrays (usually used with headers=\"keys\")\n\n * dict of iterables (usually used with headers=\"keys\")\n\n * pandas.DataFrame (usually used with headers=\"keys\")\n\n The first row can be used as headers if headers=\"firstrow\",\n column indices can be used as headers if headers=\"keys\".\n\n \"\"\"\n\n if hasattr(tabular_data, \"keys\") and hasattr(tabular_data, \"values\"):\n # dict-like and pandas.DataFrame?\n if hasattr(tabular_data.values, \"__call__\"):\n # likely a conventional dict\n keys = tabular_data.keys()\n rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed\n elif hasattr(tabular_data, \"index\"):\n # values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)\n keys = tabular_data.keys()\n vals = tabular_data.values # values matrix doesn't need to be transposed\n names = tabular_data.index\n rows = [[v] + list(row) for v, row in zip(names, vals)]\n else:\n raise ValueError(\"tabular data doesn't appear to be a dict or a DataFrame\")\n\n if headers == \"keys\":\n headers = list(map(_text_type, keys)) # headers should be strings\n\n else: # it's a usual an iterable of iterables, or a NumPy array\n rows = list(tabular_data)\n\n if (headers == \"keys\" and\n hasattr(tabular_data, \"dtype\") and\n getattr(tabular_data.dtype, \"names\")):\n # numpy record array\n headers = tabular_data.dtype.names\n elif (headers == \"keys\"\n and len(rows) > 0\n and isinstance(rows[0], tuple)\n and hasattr(rows[0], \"_fields\")):\n # namedtuple\n headers = list(map(_text_type, rows[0]._fields))\n elif (len(rows) > 0\n and isinstance(rows[0], dict)):\n # dict or OrderedDict\n uniq_keys = set() # implements hashed lookup\n keys = [] # storage for set\n if headers == \"firstrow\":\n firstdict = rows[0] if len(rows) > 0 else {}\n keys.extend(firstdict.keys())\n uniq_keys.update(keys)\n rows = rows[1:]\n for row in rows:\n for k in row.keys():\n # Save unique items in input order\n if k not in uniq_keys:\n keys.append(k)\n uniq_keys.add(k)\n if headers == 'keys':\n headers = keys\n elif isinstance(headers, dict):\n # a dict of headers for a list of dicts\n headers = [headers.get(k, k) for k in keys]\n headers = list(map(_text_type, headers))\n elif headers == \"firstrow\":\n if len(rows) > 0:\n headers = [firstdict.get(k, k) for k in keys]\n headers = list(map(_text_type, headers))\n else:\n headers = []\n elif headers:\n raise ValueError('headers for a list of dicts is not a dict or a keyword')\n rows = [[row.get(k) for k in keys] for row in rows]\n elif headers == \"keys\" and len(rows) > 0:\n # keys are column indices\n headers = list(map(_text_type, range(len(rows[0]))))\n\n # take headers from the first row if necessary\n if headers == \"firstrow\" and len(rows) > 0:\n headers = list(map(_text_type, rows[0])) # headers should be strings\n rows = rows[1:]\n\n headers = list(map(_text_type, headers))\n rows = list(map(list, rows))\n\n # pad with empty headers for initial columns if necessary\n if headers and len(rows) > 0:\n nhs = len(headers)\n ncols = len(rows[0])\n if nhs < ncols:\n headers = [\"\"] * (ncols - nhs) + headers\n\n return rows, headers\n",
"def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):\n \"\"\"Produce a plain-text representation of the table.\"\"\"\n lines = []\n hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []\n pad = fmt.padding\n headerrow = fmt.headerrow\n\n padded_widths = [(w + 2 * pad) for w in colwidths]\n if is_multiline:\n pad_row = lambda row, _: row # do it later, in _append_multiline_row\n append_row = partial(_append_multiline_row, pad=pad)\n else:\n pad_row = _pad_row\n append_row = _append_basic_row\n\n padded_headers = pad_row(headers, pad)\n padded_rows = [pad_row(row, pad) for row in rows]\n\n if fmt.lineabove and \"lineabove\" not in hidden:\n _append_line(lines, padded_widths, colaligns, fmt.lineabove)\n\n if padded_headers:\n append_row(lines, padded_headers, padded_widths, colaligns, headerrow)\n if fmt.linebelowheader and \"linebelowheader\" not in hidden:\n _append_line(lines, padded_widths, colaligns, fmt.linebelowheader)\n\n if padded_rows and fmt.linebetweenrows and \"linebetweenrows\" not in hidden:\n # initial rows with a line below\n for row in padded_rows[:-1]:\n append_row(lines, row, padded_widths, colaligns, fmt.datarow)\n _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)\n # the last row without a line below\n append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)\n else:\n for row in padded_rows:\n append_row(lines, row, padded_widths, colaligns, fmt.datarow)\n\n if fmt.linebelow and \"linebelow\" not in hidden:\n _append_line(lines, padded_widths, colaligns, fmt.linebelow)\n\n return \"\\n\".join(lines)\n"
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import re
import io
from collections import namedtuple
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
|
crate/crash
|
src/crate/crash/tabulate.py
|
_format_table
|
python
|
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline):
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2 * pad) for w in colwidths]
if is_multiline:
pad_row = lambda row, _: row # do it later, in _append_multiline_row
append_row = partial(_append_multiline_row, pad=pad)
else:
pad_row = _pad_row
append_row = _append_basic_row
padded_headers = pad_row(headers, pad)
padded_rows = [pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.lineabove)
if padded_headers:
append_row(lines, padded_headers, padded_widths, colaligns, headerrow)
if fmt.linebelowheader and "linebelowheader" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelowheader)
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
_append_line(lines, padded_widths, colaligns, fmt.linebetweenrows)
# the last row without a line below
append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow)
else:
for row in padded_rows:
append_row(lines, row, padded_widths, colaligns, fmt.datarow)
if fmt.linebelow and "linebelow" not in hidden:
_append_line(lines, padded_widths, colaligns, fmt.linebelow)
return "\n".join(lines)
|
Produce a plain-text representation of the table.
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/tabulate.py#L1118-L1158
|
[
"def _pad_row(cells, padding):\n if cells:\n pad = \" \" * padding\n padded_cells = [pad + cell + pad for cell in cells]\n return padded_cells\n else:\n return cells\n",
"def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):\n lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))\n return lines\n",
"def _append_line(lines, colwidths, colaligns, linefmt):\n lines.append(_build_line(colwidths, colaligns, linefmt))\n return lines\n",
"pad_row = lambda row, _: row # do it later, in _append_multiline_row\n"
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Sergey Astanin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Pretty-print tabular data."""
import re
import io
from collections import namedtuple
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
def float_format(val):
return str(val)
def _is_file(f):
return isinstance(f, io.IOBase)
try:
import wcwidth # optional wide-character (CJK) support
except ImportError:
wcwidth = None
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.5"
MIN_PADDING = 0
# if True, enable wide-character (CJK) support
WIDE_CHARS_MODE = wcwidth is not None
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| '}
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator * 2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = {"left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"'}
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = {"left": "l", "right": "r", "center": "c", "decimal": "r"}
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_multiline_codes = re.compile(r"\r|\n|\r\n")
_multiline_codes_bytes = re.compile(b"\r|\n|\r\n")
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string, inttype=int):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is inttype or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) \
and \
_isconvertible(inttype, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isint(string, _long_type):
return _long_type
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:>%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:<%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
def impl(val):
iwidth = width + len(val) - len(_strip_invisible(val)) if has_invisible else width
fmt = "{0:^%ds}" % iwidth
return fmt.format(val)
num_lines = s.splitlines()
return len(num_lines) > 1 and '\n'.join(map(impl, num_lines)) or impl(s)
def _padnone(ignore_width, s):
return s
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _max_line_width(s):
"""
Visible width of a potentially multinie content.
>>> _max_line_width('this\\nis\\na\\nmultiline\\ntext')
9
"""
if not s:
return 0
return max(map(len, s.splitlines()))
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return _max_line_width(_strip_invisible(s))
else:
return _max_line_width(_text_type(s))
def _is_multiline(s):
if isinstance(s, _text_type):
return bool(re.search(_multiline_codes, s))
else: # a bytestring
return bool(re.search(_multiline_codes_bytes, s))
def _multiline_width(multiline_s, line_width_fn=len):
return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
def _choose_width_fn(has_invisible, enable_widechars, is_multiline):
"""Return a function to calculate visible cell width."""
if has_invisible:
line_width_fn = _visible_width
elif enable_widechars: # optional wide-character support if available
line_width_fn = wcwidth.wcswidth
else:
line_width_fn = len
if is_multiline:
width_fn = lambda s: _multiline_width(s, line_width_fn)
else:
width_fn = line_width_fn
return width_fn
def _align_column_choose_padfn(strings, alignment, has_invisible):
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
if has_invisible:
decimals = [_afterpoint(_strip_invisible(s)) for s in strings]
else:
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
padfn = _padnone
else:
strings = [s.strip() for s in strings]
padfn = _padright
return strings, padfn
def _align_column(strings, alignment, minwidth=0,
has_invisible=True, enable_widechars=False, is_multiline=False):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
strings, padfn = _align_column_choose_padfn(strings, alignment, has_invisible)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
s_widths = list(map(width_fn, strings))
maxwidth = max(max(s_widths), minwidth)
# TODO: refactor column alignment in single-line and multiline modes
if is_multiline:
if not enable_widechars and not has_invisible:
padded_strings = [
"\n".join([padfn(maxwidth, s) for s in ms.splitlines()])
for ms in strings]
else:
# enable wide-character width corrections
s_lens = [max((len(s) for s in re.split("[\r\n]", ms))) for ms in strings]
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
if strings[0] == '':
strings[0] = ' '
padded_strings = ["\n".join([padfn(w, s) for s in (ms.splitlines() or ms)])
for ms, w in zip(strings, visible_widths)]
else: # single-line cell values
if not enable_widechars and not has_invisible:
padded_strings = [padfn(maxwidth, s) for s in strings]
else:
# enable wide-character width corrections
s_lens = list(map(len, strings))
visible_widths = [maxwidth - (w - l) for w, l in zip(s_widths, s_lens)]
# wcswidth and _visible_width don't count invisible characters;
# padfn doesn't need to apply another correction
padded_strings = [padfn(w, s) for s, w in zip(strings, visible_widths)]
return padded_strings
def _more_generic(type1, type2):
types = {_none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4}
invtypes = {4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type}
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(values, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
return reduce(_more_generic, [type(v) for v in values], int)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _long_type, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(val, (_text_type, _binary_type))
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
elif not floatfmt:
return float_format(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width, visible_width, enable_widechars=False, is_multiline=False):
if is_multiline:
header_lines = re.split(_multiline_codes, header)
padded_lines = [_align_header(h, alignment, width, visible_width) for h in header_lines]
return "\n".join(padded_lines)
# else: not multiline
ninvisible = max(0, len(header) - visible_width)
width += ninvisible
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v] + list(row) for v, row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type, keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
# Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type, headers))
rows = list(map(list, rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""] * (ncols - nhs) + headers
return rows, headers
def tabulate(tabular_data, headers=(), tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["this\\nis\\na multiline\\ntext", "41.9999", "foo\\nbar"], ["NULL", "451.0", ""]],
... ["text", "numbers", "other"], "grid"))
+-------------+----------+-------+
| text | numbers | other |
+=============+==========+=======+
| this | 41.9999 | foo |
| is | | bar |
| a multiline | | |
| text | | |
+-------------+----------+-------+
| NULL | 451 | |
+-------------+----------+-------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
enable_widechars = wcwidth is not None and WIDE_CHARS_MODE
is_multiline = _is_multiline(plain_text)
width_fn = _choose_width_fn(has_invisible, enable_widechars, is_multiline)
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval, has_invisible) for v in c]
for c, ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int, float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0] * len(cols)
cols = [_align_column(c, a, minw, has_invisible, enable_widechars, is_multiline)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw, width_fn(h), enable_widechars, is_multiline)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill * w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " " * padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt):
lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt))
return lines
def _append_multiline_row(lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad):
colwidths = [w - 2 * pad for w in padded_widths]
cells_lines = [c.splitlines() for c in padded_multiline_cells]
nlines = max(map(len, cells_lines)) # number of lines in the row
# vertically pad cells where some lines are missing
cells_lines = [(cl + [' ' * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths)]
lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)]
for ln in lines_cells:
padded_ln = _pad_row(ln, 1)
_append_basic_row(lines, padded_ln, colwidths, colaligns, rowfmt)
return lines
def _append_line(lines, colwidths, colaligns, linefmt):
lines.append(_build_line(colwidths, colaligns, linefmt))
return lines
|
crate/crash
|
src/crate/crash/layout.py
|
create_layout
|
python
|
def create_layout(lexer=None,
reserve_space_for_menu=8,
get_prompt_tokens=None,
get_bottom_toolbar_tokens=None,
extra_input_processors=None, multiline=False,
wrap_lines=True):
# Create processors list.
input_processors = [
ConditionalProcessor(
# Highlight the reverse-i-search buffer
HighlightSearchProcessor(preview_search=True),
HasFocus(SEARCH_BUFFER)),
]
if extra_input_processors:
input_processors.extend(extra_input_processors)
lexer = PygmentsLexer(lexer, sync_from_start=True)
multiline = to_cli_filter(multiline)
sidebar_token = [
(Token.Toolbar.Status.Key, "[ctrl+d]"),
(Token.Toolbar.Status, " Exit")
]
sidebar_width = token_list_width(sidebar_token)
get_sidebar_tokens = lambda _: sidebar_token
def get_height(cli):
# If there is an autocompletion menu to be shown, make sure that our
# layout has at least a minimal height in order to display it.
if reserve_space_for_menu and not cli.is_done:
buff = cli.current_buffer
# Reserve the space, either when there are completions, or when
# `complete_while_typing` is true and we expect completions very
# soon.
if buff.complete_while_typing() or buff.complete_state is not None:
return LayoutDimension(min=reserve_space_for_menu)
return LayoutDimension()
# Create and return Container instance.
return HSplit([
VSplit([
HSplit([
# The main input, with completion menus floating on top of it.
FloatContainer(
HSplit([
Window(
BufferControl(
input_processors=input_processors,
lexer=lexer,
# enable preview search for reverse-i-search
preview_search=True),
get_height=get_height,
wrap_lines=wrap_lines,
left_margins=[
# In multiline mode, use the window margin to display
# the prompt and continuation tokens.
ConditionalMargin(
PromptMargin(get_prompt_tokens),
filter=multiline
)
],
),
]),
[
# Completion menu
Float(xcursor=True,
ycursor=True,
content=CompletionsMenu(
max_height=16,
scroll_offset=1,
extra_filter=HasFocus(DEFAULT_BUFFER))
),
]
),
# reverse-i-search toolbar (ctrl+r)
ConditionalContainer(SearchToolbar(), multiline),
])
]),
] + [
VSplit([
# Left-Aligned Session Toolbar
ConditionalContainer(
Window(
TokenListControl(get_bottom_toolbar_tokens),
height=LayoutDimension.exact(1)
),
filter=~IsDone() & RendererHeightIsKnown()),
# Right-Aligned Container
ConditionalContainer(
Window(
TokenListControl(get_sidebar_tokens),
height=LayoutDimension.exact(1),
width=LayoutDimension.exact(sidebar_width)
),
filter=~IsDone() & RendererHeightIsKnown())
])
])
|
Creates a custom `Layout` for the Crash input REPL
This layout includes:
* a bottom left-aligned session toolbar container
* a bottom right-aligned side-bar container
+-------------------------------------------+
| cr> select 1; |
| |
| |
+-------------------------------------------+
| bottom_toolbar_tokens sidebar_tokens |
+-------------------------------------------+
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/layout.py#L39-L157
| null |
# vim: set fileencodings=utf-8
# -*- coding: utf-8; -*-
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from prompt_toolkit.filters import IsDone, HasFocus, RendererHeightIsKnown, to_cli_filter
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER
from prompt_toolkit.token import Token
from prompt_toolkit.layout import Window, HSplit, VSplit, Float
from prompt_toolkit.layout.containers import ConditionalContainer, FloatContainer
from prompt_toolkit.layout.dimension import LayoutDimension
from prompt_toolkit.layout.controls import TokenListControl, BufferControl
from prompt_toolkit.layout.lexers import PygmentsLexer
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightSearchProcessor
from prompt_toolkit.layout.toolbars import SearchToolbar
from prompt_toolkit.layout.margins import PromptMargin, ConditionalMargin
from prompt_toolkit.layout.utils import token_list_width
|
crate/crash
|
src/crate/crash/command.py
|
parse_config_path
|
python
|
def parse_config_path(args=sys.argv):
config = CONFIG_PATH
if '--config' in args:
idx = args.index('--config')
if len(args) > idx + 1:
config = args.pop(idx + 1)
args.pop(idx)
return config
|
Preprocess sys.argv and extract --config argument.
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L85-L96
| null |
# vim: set fileencodings=utf-8
# -*- coding: utf-8; -*-
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import print_function
import logging
import os
import re
import sys
import urllib3
from getpass import getpass
from appdirs import user_data_dir, user_config_dir
from argparse import ArgumentParser
from collections import namedtuple
from crate.client import connect
from crate.client.exceptions import ConnectionError, ProgrammingError
from distutils.version import StrictVersion
from urllib3.exceptions import LocationParseError
from operator import itemgetter
from .commands import built_in_commands, Command
from .config import Configuration, ConfigurationError
from .outputs import OutputWriter
from .printer import ColorPrinter, PrintWrapper
from .sysinfo import SysInfoCommand
from ..crash import __version__ as crash_version
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('crate').addHandler(NullHandler())
USER_DATA_DIR = user_data_dir("Crate", "Crate")
HISTORY_FILE_NAME = 'crash_history'
HISTORY_PATH = os.path.join(USER_DATA_DIR, HISTORY_FILE_NAME)
USER_CONFIG_DIR = user_config_dir("Crate", "Crate")
CONFIG_FILE_NAME = 'crash.cfg'
CONFIG_PATH = os.path.join(USER_CONFIG_DIR, CONFIG_FILE_NAME)
Result = namedtuple('Result', ['cols',
'rows',
'rowcount',
'duration',
'output_width'])
ConnectionMeta = namedtuple('ConnectionMeta', ['user', 'schema'])
TABLE_SCHEMA_MIN_VERSION = StrictVersion("0.57.0")
TABLE_TYPE_MIN_VERSION = StrictVersion("2.0.0")
def parse_args(parser):
"""
Parse sys.argv arguments with given parser
"""
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser.parse_args()
def boolean(v):
if str(v).lower() in ("yes", "true", "t", "1"):
return True
elif str(v).lower() in ("no", "false", "f", "0"):
return False
else:
raise ValueError('not a boolean value')
def get_parser(output_formats=[], conf=None):
"""
Create an argument parser that reads default values from a
configuration file if provided.
"""
def _conf_or_default(key, value):
return value if conf is None else conf.get_or_set(key, value)
parser = ArgumentParser(description='crate shell')
parser.add_argument('-v', '--verbose', action='count',
dest='verbose', default=_conf_or_default('verbosity', 0),
help='print debug information to STDOUT')
parser.add_argument('-A', '--no-autocomplete', action='store_false',
dest='autocomplete',
default=_conf_or_default('autocomplete', True),
help='disable SQL keywords autocompletion')
parser.add_argument('-a', '--autocapitalize', action='store_true',
dest='autocapitalize',
default=False,
help='enable automatic capitalization of SQL keywords while typing')
parser.add_argument('-U', '--username', type=str, metavar='USERNAME',
help='Authenticate as USERNAME.')
parser.add_argument('-W', '--password', action='store_true',
dest='force_passwd_prompt', default=_conf_or_default('force_passwd_prompt', False),
help='force a password prompt')
parser.add_argument('--schema', type=str,
help='default schema for statements if schema is not explicitly stated in queries')
parser.add_argument('--history', type=str, metavar='FILENAME',
help='Use FILENAME as a history file', default=HISTORY_PATH)
parser.add_argument('--config', type=str, metavar='FILENAME',
help='use FILENAME as a configuration file', default=CONFIG_PATH)
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--command', type=str, metavar='STATEMENT',
help='Execute the STATEMENT and exit.')
group.add_argument('--sysinfo', action='store_true', default=False,
help='print system and cluster information')
parser.add_argument('--hosts', type=str, nargs='*',
default=_conf_or_default('hosts', ['localhost:4200']),
help='connect to HOSTS.', metavar='HOSTS')
parser.add_argument('--verify-ssl', type=boolean, default=True,
help='force the verification of the server SSL certificate')
parser.add_argument('--cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the client certificate file')
parser.add_argument('--key-file', type=file_with_permissions, metavar='FILENAME',
help='Use FILENAME as the client certificate key file')
parser.add_argument('--ca-cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the CA certificate file')
parser.add_argument('--format', type=str,
default=_conf_or_default('format', 'tabular'),
choices=output_formats, metavar='FORMAT',
help='the output FORMAT of the SQL response')
parser.add_argument('--version', action='store_true', default=False,
help='print the Crash version and exit')
return parser
def noargs_command(fn):
def inner_fn(self, *args):
if len(args):
self.logger.critical("Command does not take any arguments.")
return
return fn(self, *args)
inner_fn.__doc__ = fn.__doc__
return inner_fn
def _parse_statements(lines):
"""Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1']
"""
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts)
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
@noargs_command
def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit())
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
def stmt_type(statement):
"""
Extract type of statement, e.g. SELECT, INSERT, UPDATE, DELETE, ...
"""
return re.findall(r'[\w]+', statement)[0].upper()
def get_stdin():
"""
Get data from stdin, if any
"""
if not sys.stdin.isatty():
for line in sys.stdin:
yield line
return
def host_and_port(host_or_port):
"""
Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200
"""
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200'
def get_information_schema_query(lowest_server_version):
schema_name = \
"table_schema" if lowest_server_version >= \
TABLE_SCHEMA_MIN_VERSION else "schema_name"
information_schema_query = \
""" select count(distinct(table_name))
as number_of_tables
from information_schema.tables
where {schema}
not in ('information_schema', 'sys', 'pg_catalog') """
return information_schema_query.format(schema=schema_name)
def main():
is_tty = sys.stdout.isatty()
printer = ColorPrinter(is_tty)
output_writer = OutputWriter(PrintWrapper(), is_tty)
config = parse_config_path()
conf = None
try:
conf = Configuration(config)
except ConfigurationError as e:
printer.warn(str(e))
parser = get_parser(output_writer.formats)
parser.print_usage()
sys.exit(1)
parser = get_parser(output_writer.formats, conf=conf)
try:
args = parse_args(parser)
except Exception as e:
printer.warn(str(e))
sys.exit(1)
output_writer.output_format = args.format
if args.version:
printer.info(crash_version)
sys.exit(0)
crate_hosts = [host_and_port(h) for h in args.hosts]
error_trace = args.verbose > 0
force_passwd_prompt = args.force_passwd_prompt
password = None
# If password prompt is not forced try to get it from env. variable.
if not force_passwd_prompt:
password = os.environ.get('CRATEPW', None)
# Prompt for password immediately to avoid that the first time trying to
# connect to the server runs into an `Unauthorized` excpetion
# is_tty = False
if force_passwd_prompt and not password and is_tty:
password = getpass()
# Tries to create a connection to the server.
# Prompts for the password automatically if the server only accepts
# password authentication.
cmd = None
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer, is_tty,
args, password=password)
except (ProgrammingError, LocationParseError) as e:
if '401' in e.message and not force_passwd_prompt:
if is_tty:
password = getpass()
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer,
is_tty, args, password=password)
except (ProgrammingError, LocationParseError) as ex:
printer.warn(str(ex))
sys.exit(1)
else:
raise e
except Exception as e:
printer.warn(str(e))
sys.exit(1)
cmd._verify_connection(verbose=error_trace)
if not cmd.is_conn_available():
sys.exit(1)
done = False
stdin_data = get_stdin()
if args.sysinfo:
cmd.output_writer.output_format = 'mixed'
cmd.sys_info_cmd.execute()
done = True
if args.command:
cmd.process(args.command)
done = True
elif stdin_data:
if cmd.process_iterable(stdin_data):
done = True
if not done:
from .repl import loop
loop(cmd, args.history)
conf.save()
sys.exit(cmd.exit())
def _create_shell(crate_hosts, error_trace, output_writer, is_tty, args,
timeout=None, password=None):
return CrateShell(crate_hosts,
error_trace=error_trace,
output_writer=output_writer,
is_tty=is_tty,
autocomplete=args.autocomplete,
autocapitalize=args.autocapitalize,
verify_ssl=args.verify_ssl,
cert_file=args.cert_file,
key_file=args.key_file,
ca_cert_file=args.ca_cert_file,
username=args.username,
password=password,
schema=args.schema,
timeout=timeout)
def file_with_permissions(path):
open(path, 'r').close()
return path
if __name__ == '__main__':
main()
|
crate/crash
|
src/crate/crash/command.py
|
get_parser
|
python
|
def get_parser(output_formats=[], conf=None):
def _conf_or_default(key, value):
return value if conf is None else conf.get_or_set(key, value)
parser = ArgumentParser(description='crate shell')
parser.add_argument('-v', '--verbose', action='count',
dest='verbose', default=_conf_or_default('verbosity', 0),
help='print debug information to STDOUT')
parser.add_argument('-A', '--no-autocomplete', action='store_false',
dest='autocomplete',
default=_conf_or_default('autocomplete', True),
help='disable SQL keywords autocompletion')
parser.add_argument('-a', '--autocapitalize', action='store_true',
dest='autocapitalize',
default=False,
help='enable automatic capitalization of SQL keywords while typing')
parser.add_argument('-U', '--username', type=str, metavar='USERNAME',
help='Authenticate as USERNAME.')
parser.add_argument('-W', '--password', action='store_true',
dest='force_passwd_prompt', default=_conf_or_default('force_passwd_prompt', False),
help='force a password prompt')
parser.add_argument('--schema', type=str,
help='default schema for statements if schema is not explicitly stated in queries')
parser.add_argument('--history', type=str, metavar='FILENAME',
help='Use FILENAME as a history file', default=HISTORY_PATH)
parser.add_argument('--config', type=str, metavar='FILENAME',
help='use FILENAME as a configuration file', default=CONFIG_PATH)
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--command', type=str, metavar='STATEMENT',
help='Execute the STATEMENT and exit.')
group.add_argument('--sysinfo', action='store_true', default=False,
help='print system and cluster information')
parser.add_argument('--hosts', type=str, nargs='*',
default=_conf_or_default('hosts', ['localhost:4200']),
help='connect to HOSTS.', metavar='HOSTS')
parser.add_argument('--verify-ssl', type=boolean, default=True,
help='force the verification of the server SSL certificate')
parser.add_argument('--cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the client certificate file')
parser.add_argument('--key-file', type=file_with_permissions, metavar='FILENAME',
help='Use FILENAME as the client certificate key file')
parser.add_argument('--ca-cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the CA certificate file')
parser.add_argument('--format', type=str,
default=_conf_or_default('format', 'tabular'),
choices=output_formats, metavar='FORMAT',
help='the output FORMAT of the SQL response')
parser.add_argument('--version', action='store_true', default=False,
help='print the Crash version and exit')
return parser
|
Create an argument parser that reads default values from a
configuration file if provided.
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L121-L178
|
[
"def _conf_or_default(key, value):\n return value if conf is None else conf.get_or_set(key, value)\n"
] |
# vim: set fileencodings=utf-8
# -*- coding: utf-8; -*-
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import print_function
import logging
import os
import re
import sys
import urllib3
from getpass import getpass
from appdirs import user_data_dir, user_config_dir
from argparse import ArgumentParser
from collections import namedtuple
from crate.client import connect
from crate.client.exceptions import ConnectionError, ProgrammingError
from distutils.version import StrictVersion
from urllib3.exceptions import LocationParseError
from operator import itemgetter
from .commands import built_in_commands, Command
from .config import Configuration, ConfigurationError
from .outputs import OutputWriter
from .printer import ColorPrinter, PrintWrapper
from .sysinfo import SysInfoCommand
from ..crash import __version__ as crash_version
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('crate').addHandler(NullHandler())
USER_DATA_DIR = user_data_dir("Crate", "Crate")
HISTORY_FILE_NAME = 'crash_history'
HISTORY_PATH = os.path.join(USER_DATA_DIR, HISTORY_FILE_NAME)
USER_CONFIG_DIR = user_config_dir("Crate", "Crate")
CONFIG_FILE_NAME = 'crash.cfg'
CONFIG_PATH = os.path.join(USER_CONFIG_DIR, CONFIG_FILE_NAME)
Result = namedtuple('Result', ['cols',
'rows',
'rowcount',
'duration',
'output_width'])
ConnectionMeta = namedtuple('ConnectionMeta', ['user', 'schema'])
TABLE_SCHEMA_MIN_VERSION = StrictVersion("0.57.0")
TABLE_TYPE_MIN_VERSION = StrictVersion("2.0.0")
def parse_config_path(args=sys.argv):
"""
Preprocess sys.argv and extract --config argument.
"""
config = CONFIG_PATH
if '--config' in args:
idx = args.index('--config')
if len(args) > idx + 1:
config = args.pop(idx + 1)
args.pop(idx)
return config
def parse_args(parser):
"""
Parse sys.argv arguments with given parser
"""
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser.parse_args()
def boolean(v):
if str(v).lower() in ("yes", "true", "t", "1"):
return True
elif str(v).lower() in ("no", "false", "f", "0"):
return False
else:
raise ValueError('not a boolean value')
def noargs_command(fn):
def inner_fn(self, *args):
if len(args):
self.logger.critical("Command does not take any arguments.")
return
return fn(self, *args)
inner_fn.__doc__ = fn.__doc__
return inner_fn
def _parse_statements(lines):
"""Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1']
"""
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts)
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
@noargs_command
def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit())
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
def stmt_type(statement):
"""
Extract type of statement, e.g. SELECT, INSERT, UPDATE, DELETE, ...
"""
return re.findall(r'[\w]+', statement)[0].upper()
def get_stdin():
"""
Get data from stdin, if any
"""
if not sys.stdin.isatty():
for line in sys.stdin:
yield line
return
def host_and_port(host_or_port):
"""
Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200
"""
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200'
def get_information_schema_query(lowest_server_version):
schema_name = \
"table_schema" if lowest_server_version >= \
TABLE_SCHEMA_MIN_VERSION else "schema_name"
information_schema_query = \
""" select count(distinct(table_name))
as number_of_tables
from information_schema.tables
where {schema}
not in ('information_schema', 'sys', 'pg_catalog') """
return information_schema_query.format(schema=schema_name)
def main():
is_tty = sys.stdout.isatty()
printer = ColorPrinter(is_tty)
output_writer = OutputWriter(PrintWrapper(), is_tty)
config = parse_config_path()
conf = None
try:
conf = Configuration(config)
except ConfigurationError as e:
printer.warn(str(e))
parser = get_parser(output_writer.formats)
parser.print_usage()
sys.exit(1)
parser = get_parser(output_writer.formats, conf=conf)
try:
args = parse_args(parser)
except Exception as e:
printer.warn(str(e))
sys.exit(1)
output_writer.output_format = args.format
if args.version:
printer.info(crash_version)
sys.exit(0)
crate_hosts = [host_and_port(h) for h in args.hosts]
error_trace = args.verbose > 0
force_passwd_prompt = args.force_passwd_prompt
password = None
# If password prompt is not forced try to get it from env. variable.
if not force_passwd_prompt:
password = os.environ.get('CRATEPW', None)
# Prompt for password immediately to avoid that the first time trying to
# connect to the server runs into an `Unauthorized` excpetion
# is_tty = False
if force_passwd_prompt and not password and is_tty:
password = getpass()
# Tries to create a connection to the server.
# Prompts for the password automatically if the server only accepts
# password authentication.
cmd = None
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer, is_tty,
args, password=password)
except (ProgrammingError, LocationParseError) as e:
if '401' in e.message and not force_passwd_prompt:
if is_tty:
password = getpass()
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer,
is_tty, args, password=password)
except (ProgrammingError, LocationParseError) as ex:
printer.warn(str(ex))
sys.exit(1)
else:
raise e
except Exception as e:
printer.warn(str(e))
sys.exit(1)
cmd._verify_connection(verbose=error_trace)
if not cmd.is_conn_available():
sys.exit(1)
done = False
stdin_data = get_stdin()
if args.sysinfo:
cmd.output_writer.output_format = 'mixed'
cmd.sys_info_cmd.execute()
done = True
if args.command:
cmd.process(args.command)
done = True
elif stdin_data:
if cmd.process_iterable(stdin_data):
done = True
if not done:
from .repl import loop
loop(cmd, args.history)
conf.save()
sys.exit(cmd.exit())
def _create_shell(crate_hosts, error_trace, output_writer, is_tty, args,
timeout=None, password=None):
return CrateShell(crate_hosts,
error_trace=error_trace,
output_writer=output_writer,
is_tty=is_tty,
autocomplete=args.autocomplete,
autocapitalize=args.autocapitalize,
verify_ssl=args.verify_ssl,
cert_file=args.cert_file,
key_file=args.key_file,
ca_cert_file=args.ca_cert_file,
username=args.username,
password=password,
schema=args.schema,
timeout=timeout)
def file_with_permissions(path):
open(path, 'r').close()
return path
if __name__ == '__main__':
main()
|
crate/crash
|
src/crate/crash/command.py
|
_parse_statements
|
python
|
def _parse_statements(lines):
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts)
|
Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1']
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L191-L213
| null |
# vim: set fileencodings=utf-8
# -*- coding: utf-8; -*-
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import print_function
import logging
import os
import re
import sys
import urllib3
from getpass import getpass
from appdirs import user_data_dir, user_config_dir
from argparse import ArgumentParser
from collections import namedtuple
from crate.client import connect
from crate.client.exceptions import ConnectionError, ProgrammingError
from distutils.version import StrictVersion
from urllib3.exceptions import LocationParseError
from operator import itemgetter
from .commands import built_in_commands, Command
from .config import Configuration, ConfigurationError
from .outputs import OutputWriter
from .printer import ColorPrinter, PrintWrapper
from .sysinfo import SysInfoCommand
from ..crash import __version__ as crash_version
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('crate').addHandler(NullHandler())
USER_DATA_DIR = user_data_dir("Crate", "Crate")
HISTORY_FILE_NAME = 'crash_history'
HISTORY_PATH = os.path.join(USER_DATA_DIR, HISTORY_FILE_NAME)
USER_CONFIG_DIR = user_config_dir("Crate", "Crate")
CONFIG_FILE_NAME = 'crash.cfg'
CONFIG_PATH = os.path.join(USER_CONFIG_DIR, CONFIG_FILE_NAME)
Result = namedtuple('Result', ['cols',
'rows',
'rowcount',
'duration',
'output_width'])
ConnectionMeta = namedtuple('ConnectionMeta', ['user', 'schema'])
TABLE_SCHEMA_MIN_VERSION = StrictVersion("0.57.0")
TABLE_TYPE_MIN_VERSION = StrictVersion("2.0.0")
def parse_config_path(args=sys.argv):
"""
Preprocess sys.argv and extract --config argument.
"""
config = CONFIG_PATH
if '--config' in args:
idx = args.index('--config')
if len(args) > idx + 1:
config = args.pop(idx + 1)
args.pop(idx)
return config
def parse_args(parser):
"""
Parse sys.argv arguments with given parser
"""
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser.parse_args()
def boolean(v):
if str(v).lower() in ("yes", "true", "t", "1"):
return True
elif str(v).lower() in ("no", "false", "f", "0"):
return False
else:
raise ValueError('not a boolean value')
def get_parser(output_formats=[], conf=None):
"""
Create an argument parser that reads default values from a
configuration file if provided.
"""
def _conf_or_default(key, value):
return value if conf is None else conf.get_or_set(key, value)
parser = ArgumentParser(description='crate shell')
parser.add_argument('-v', '--verbose', action='count',
dest='verbose', default=_conf_or_default('verbosity', 0),
help='print debug information to STDOUT')
parser.add_argument('-A', '--no-autocomplete', action='store_false',
dest='autocomplete',
default=_conf_or_default('autocomplete', True),
help='disable SQL keywords autocompletion')
parser.add_argument('-a', '--autocapitalize', action='store_true',
dest='autocapitalize',
default=False,
help='enable automatic capitalization of SQL keywords while typing')
parser.add_argument('-U', '--username', type=str, metavar='USERNAME',
help='Authenticate as USERNAME.')
parser.add_argument('-W', '--password', action='store_true',
dest='force_passwd_prompt', default=_conf_or_default('force_passwd_prompt', False),
help='force a password prompt')
parser.add_argument('--schema', type=str,
help='default schema for statements if schema is not explicitly stated in queries')
parser.add_argument('--history', type=str, metavar='FILENAME',
help='Use FILENAME as a history file', default=HISTORY_PATH)
parser.add_argument('--config', type=str, metavar='FILENAME',
help='use FILENAME as a configuration file', default=CONFIG_PATH)
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--command', type=str, metavar='STATEMENT',
help='Execute the STATEMENT and exit.')
group.add_argument('--sysinfo', action='store_true', default=False,
help='print system and cluster information')
parser.add_argument('--hosts', type=str, nargs='*',
default=_conf_or_default('hosts', ['localhost:4200']),
help='connect to HOSTS.', metavar='HOSTS')
parser.add_argument('--verify-ssl', type=boolean, default=True,
help='force the verification of the server SSL certificate')
parser.add_argument('--cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the client certificate file')
parser.add_argument('--key-file', type=file_with_permissions, metavar='FILENAME',
help='Use FILENAME as the client certificate key file')
parser.add_argument('--ca-cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the CA certificate file')
parser.add_argument('--format', type=str,
default=_conf_or_default('format', 'tabular'),
choices=output_formats, metavar='FORMAT',
help='the output FORMAT of the SQL response')
parser.add_argument('--version', action='store_true', default=False,
help='print the Crash version and exit')
return parser
def noargs_command(fn):
def inner_fn(self, *args):
if len(args):
self.logger.critical("Command does not take any arguments.")
return
return fn(self, *args)
inner_fn.__doc__ = fn.__doc__
return inner_fn
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
@noargs_command
def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit())
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
def stmt_type(statement):
"""
Extract type of statement, e.g. SELECT, INSERT, UPDATE, DELETE, ...
"""
return re.findall(r'[\w]+', statement)[0].upper()
def get_stdin():
"""
Get data from stdin, if any
"""
if not sys.stdin.isatty():
for line in sys.stdin:
yield line
return
def host_and_port(host_or_port):
"""
Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200
"""
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200'
def get_information_schema_query(lowest_server_version):
schema_name = \
"table_schema" if lowest_server_version >= \
TABLE_SCHEMA_MIN_VERSION else "schema_name"
information_schema_query = \
""" select count(distinct(table_name))
as number_of_tables
from information_schema.tables
where {schema}
not in ('information_schema', 'sys', 'pg_catalog') """
return information_schema_query.format(schema=schema_name)
def main():
is_tty = sys.stdout.isatty()
printer = ColorPrinter(is_tty)
output_writer = OutputWriter(PrintWrapper(), is_tty)
config = parse_config_path()
conf = None
try:
conf = Configuration(config)
except ConfigurationError as e:
printer.warn(str(e))
parser = get_parser(output_writer.formats)
parser.print_usage()
sys.exit(1)
parser = get_parser(output_writer.formats, conf=conf)
try:
args = parse_args(parser)
except Exception as e:
printer.warn(str(e))
sys.exit(1)
output_writer.output_format = args.format
if args.version:
printer.info(crash_version)
sys.exit(0)
crate_hosts = [host_and_port(h) for h in args.hosts]
error_trace = args.verbose > 0
force_passwd_prompt = args.force_passwd_prompt
password = None
# If password prompt is not forced try to get it from env. variable.
if not force_passwd_prompt:
password = os.environ.get('CRATEPW', None)
# Prompt for password immediately to avoid that the first time trying to
# connect to the server runs into an `Unauthorized` excpetion
# is_tty = False
if force_passwd_prompt and not password and is_tty:
password = getpass()
# Tries to create a connection to the server.
# Prompts for the password automatically if the server only accepts
# password authentication.
cmd = None
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer, is_tty,
args, password=password)
except (ProgrammingError, LocationParseError) as e:
if '401' in e.message and not force_passwd_prompt:
if is_tty:
password = getpass()
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer,
is_tty, args, password=password)
except (ProgrammingError, LocationParseError) as ex:
printer.warn(str(ex))
sys.exit(1)
else:
raise e
except Exception as e:
printer.warn(str(e))
sys.exit(1)
cmd._verify_connection(verbose=error_trace)
if not cmd.is_conn_available():
sys.exit(1)
done = False
stdin_data = get_stdin()
if args.sysinfo:
cmd.output_writer.output_format = 'mixed'
cmd.sys_info_cmd.execute()
done = True
if args.command:
cmd.process(args.command)
done = True
elif stdin_data:
if cmd.process_iterable(stdin_data):
done = True
if not done:
from .repl import loop
loop(cmd, args.history)
conf.save()
sys.exit(cmd.exit())
def _create_shell(crate_hosts, error_trace, output_writer, is_tty, args,
timeout=None, password=None):
return CrateShell(crate_hosts,
error_trace=error_trace,
output_writer=output_writer,
is_tty=is_tty,
autocomplete=args.autocomplete,
autocapitalize=args.autocapitalize,
verify_ssl=args.verify_ssl,
cert_file=args.cert_file,
key_file=args.key_file,
ca_cert_file=args.ca_cert_file,
username=args.username,
password=password,
schema=args.schema,
timeout=timeout)
def file_with_permissions(path):
open(path, 'r').close()
return path
if __name__ == '__main__':
main()
|
crate/crash
|
src/crate/crash/command.py
|
host_and_port
|
python
|
def host_and_port(host_or_port):
if ':' in host_or_port:
if len(host_or_port) == 1:
return 'localhost:4200'
elif host_or_port.startswith(':'):
return 'localhost' + host_or_port
return host_or_port
return host_or_port + ':4200'
|
Return full hostname/IP + port, possible input formats are:
* host:port -> host:port
* : -> localhost:4200
* :port -> localhost:port
* host -> host:4200
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L519-L533
| null |
# vim: set fileencodings=utf-8
# -*- coding: utf-8; -*-
# PYTHON_ARGCOMPLETE_OK
#
# Licensed to CRATE Technology GmbH ("Crate") under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. Crate licenses
# this file to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# However, if you have executed another commercial license agreement
# with Crate these terms will supersede the license and you may use the
# software solely pursuant to the terms of the relevant commercial agreement.
from __future__ import print_function
import logging
import os
import re
import sys
import urllib3
from getpass import getpass
from appdirs import user_data_dir, user_config_dir
from argparse import ArgumentParser
from collections import namedtuple
from crate.client import connect
from crate.client.exceptions import ConnectionError, ProgrammingError
from distutils.version import StrictVersion
from urllib3.exceptions import LocationParseError
from operator import itemgetter
from .commands import built_in_commands, Command
from .config import Configuration, ConfigurationError
from .outputs import OutputWriter
from .printer import ColorPrinter, PrintWrapper
from .sysinfo import SysInfoCommand
from ..crash import __version__ as crash_version
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('crate').addHandler(NullHandler())
USER_DATA_DIR = user_data_dir("Crate", "Crate")
HISTORY_FILE_NAME = 'crash_history'
HISTORY_PATH = os.path.join(USER_DATA_DIR, HISTORY_FILE_NAME)
USER_CONFIG_DIR = user_config_dir("Crate", "Crate")
CONFIG_FILE_NAME = 'crash.cfg'
CONFIG_PATH = os.path.join(USER_CONFIG_DIR, CONFIG_FILE_NAME)
Result = namedtuple('Result', ['cols',
'rows',
'rowcount',
'duration',
'output_width'])
ConnectionMeta = namedtuple('ConnectionMeta', ['user', 'schema'])
TABLE_SCHEMA_MIN_VERSION = StrictVersion("0.57.0")
TABLE_TYPE_MIN_VERSION = StrictVersion("2.0.0")
def parse_config_path(args=sys.argv):
"""
Preprocess sys.argv and extract --config argument.
"""
config = CONFIG_PATH
if '--config' in args:
idx = args.index('--config')
if len(args) > idx + 1:
config = args.pop(idx + 1)
args.pop(idx)
return config
def parse_args(parser):
"""
Parse sys.argv arguments with given parser
"""
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
return parser.parse_args()
def boolean(v):
if str(v).lower() in ("yes", "true", "t", "1"):
return True
elif str(v).lower() in ("no", "false", "f", "0"):
return False
else:
raise ValueError('not a boolean value')
def get_parser(output_formats=[], conf=None):
"""
Create an argument parser that reads default values from a
configuration file if provided.
"""
def _conf_or_default(key, value):
return value if conf is None else conf.get_or_set(key, value)
parser = ArgumentParser(description='crate shell')
parser.add_argument('-v', '--verbose', action='count',
dest='verbose', default=_conf_or_default('verbosity', 0),
help='print debug information to STDOUT')
parser.add_argument('-A', '--no-autocomplete', action='store_false',
dest='autocomplete',
default=_conf_or_default('autocomplete', True),
help='disable SQL keywords autocompletion')
parser.add_argument('-a', '--autocapitalize', action='store_true',
dest='autocapitalize',
default=False,
help='enable automatic capitalization of SQL keywords while typing')
parser.add_argument('-U', '--username', type=str, metavar='USERNAME',
help='Authenticate as USERNAME.')
parser.add_argument('-W', '--password', action='store_true',
dest='force_passwd_prompt', default=_conf_or_default('force_passwd_prompt', False),
help='force a password prompt')
parser.add_argument('--schema', type=str,
help='default schema for statements if schema is not explicitly stated in queries')
parser.add_argument('--history', type=str, metavar='FILENAME',
help='Use FILENAME as a history file', default=HISTORY_PATH)
parser.add_argument('--config', type=str, metavar='FILENAME',
help='use FILENAME as a configuration file', default=CONFIG_PATH)
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', '--command', type=str, metavar='STATEMENT',
help='Execute the STATEMENT and exit.')
group.add_argument('--sysinfo', action='store_true', default=False,
help='print system and cluster information')
parser.add_argument('--hosts', type=str, nargs='*',
default=_conf_or_default('hosts', ['localhost:4200']),
help='connect to HOSTS.', metavar='HOSTS')
parser.add_argument('--verify-ssl', type=boolean, default=True,
help='force the verification of the server SSL certificate')
parser.add_argument('--cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the client certificate file')
parser.add_argument('--key-file', type=file_with_permissions, metavar='FILENAME',
help='Use FILENAME as the client certificate key file')
parser.add_argument('--ca-cert-file', type=file_with_permissions, metavar='FILENAME',
help='use FILENAME as the CA certificate file')
parser.add_argument('--format', type=str,
default=_conf_or_default('format', 'tabular'),
choices=output_formats, metavar='FORMAT',
help='the output FORMAT of the SQL response')
parser.add_argument('--version', action='store_true', default=False,
help='print the Crash version and exit')
return parser
def noargs_command(fn):
def inner_fn(self, *args):
if len(args):
self.logger.critical("Command does not take any arguments.")
return
return fn(self, *args)
inner_fn.__doc__ = fn.__doc__
return inner_fn
def _parse_statements(lines):
"""Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1']
"""
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts)
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
@noargs_command
def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit())
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
def stmt_type(statement):
"""
Extract type of statement, e.g. SELECT, INSERT, UPDATE, DELETE, ...
"""
return re.findall(r'[\w]+', statement)[0].upper()
def get_stdin():
"""
Get data from stdin, if any
"""
if not sys.stdin.isatty():
for line in sys.stdin:
yield line
return
def get_information_schema_query(lowest_server_version):
schema_name = \
"table_schema" if lowest_server_version >= \
TABLE_SCHEMA_MIN_VERSION else "schema_name"
information_schema_query = \
""" select count(distinct(table_name))
as number_of_tables
from information_schema.tables
where {schema}
not in ('information_schema', 'sys', 'pg_catalog') """
return information_schema_query.format(schema=schema_name)
def main():
is_tty = sys.stdout.isatty()
printer = ColorPrinter(is_tty)
output_writer = OutputWriter(PrintWrapper(), is_tty)
config = parse_config_path()
conf = None
try:
conf = Configuration(config)
except ConfigurationError as e:
printer.warn(str(e))
parser = get_parser(output_writer.formats)
parser.print_usage()
sys.exit(1)
parser = get_parser(output_writer.formats, conf=conf)
try:
args = parse_args(parser)
except Exception as e:
printer.warn(str(e))
sys.exit(1)
output_writer.output_format = args.format
if args.version:
printer.info(crash_version)
sys.exit(0)
crate_hosts = [host_and_port(h) for h in args.hosts]
error_trace = args.verbose > 0
force_passwd_prompt = args.force_passwd_prompt
password = None
# If password prompt is not forced try to get it from env. variable.
if not force_passwd_prompt:
password = os.environ.get('CRATEPW', None)
# Prompt for password immediately to avoid that the first time trying to
# connect to the server runs into an `Unauthorized` excpetion
# is_tty = False
if force_passwd_prompt and not password and is_tty:
password = getpass()
# Tries to create a connection to the server.
# Prompts for the password automatically if the server only accepts
# password authentication.
cmd = None
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer, is_tty,
args, password=password)
except (ProgrammingError, LocationParseError) as e:
if '401' in e.message and not force_passwd_prompt:
if is_tty:
password = getpass()
try:
cmd = _create_shell(crate_hosts, error_trace, output_writer,
is_tty, args, password=password)
except (ProgrammingError, LocationParseError) as ex:
printer.warn(str(ex))
sys.exit(1)
else:
raise e
except Exception as e:
printer.warn(str(e))
sys.exit(1)
cmd._verify_connection(verbose=error_trace)
if not cmd.is_conn_available():
sys.exit(1)
done = False
stdin_data = get_stdin()
if args.sysinfo:
cmd.output_writer.output_format = 'mixed'
cmd.sys_info_cmd.execute()
done = True
if args.command:
cmd.process(args.command)
done = True
elif stdin_data:
if cmd.process_iterable(stdin_data):
done = True
if not done:
from .repl import loop
loop(cmd, args.history)
conf.save()
sys.exit(cmd.exit())
def _create_shell(crate_hosts, error_trace, output_writer, is_tty, args,
timeout=None, password=None):
return CrateShell(crate_hosts,
error_trace=error_trace,
output_writer=output_writer,
is_tty=is_tty,
autocomplete=args.autocomplete,
autocapitalize=args.autocapitalize,
verify_ssl=args.verify_ssl,
cert_file=args.cert_file,
key_file=args.key_file,
ca_cert_file=args.ca_cert_file,
username=args.username,
password=password,
schema=args.schema,
timeout=timeout)
def file_with_permissions(path):
open(path, 'r').close()
return path
if __name__ == '__main__':
main()
|
crate/crash
|
src/crate/crash/command.py
|
CrateShell._show_tables
|
python
|
def _show_tables(self, *args):
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
|
print the existing tables within the 'doc' schema
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L321-L333
| null |
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
@noargs_command
def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit())
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
|
crate/crash
|
src/crate/crash/command.py
|
CrateShell._quit
|
python
|
def _quit(self, *args):
self.logger.warn('Bye!')
sys.exit(self.exit())
|
quit crash
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L336-L339
| null |
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
@noargs_command
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def _connect(self, servers):
""" connect to the given server, e.g.: \\connect localhost:4200 """
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
|
crate/crash
|
src/crate/crash/command.py
|
CrateShell._connect
|
python
|
def _connect(self, servers):
self._do_connect(servers.split(' '))
self._verify_connection(verbose=True)
|
connect to the given server, e.g.: \\connect localhost:4200
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/command.py#L362-L365
| null |
class CrateShell:
def __init__(self,
crate_hosts=['localhost:4200'],
output_writer=None,
error_trace=False,
is_tty=True,
autocomplete=True,
autocapitalize=True,
verify_ssl=True,
cert_file=None,
key_file=None,
ca_cert_file=None,
username=None,
password=None,
schema=None,
timeout=None):
self.last_connected_servers = []
self.exit_code = 0
self.expanded_mode = False
self.sys_info_cmd = SysInfoCommand(self)
self.commands = {
'q': self._quit,
'c': self._connect,
'connect': self._connect,
'dt': self._show_tables,
'sysinfo': self.sys_info_cmd.execute,
}
self.commands.update(built_in_commands)
self.logger = ColorPrinter(is_tty)
self.output_writer = output_writer or OutputWriter(PrintWrapper(), is_tty)
self.error_trace = error_trace
self._autocomplete = autocomplete
self._autocapitalize = autocapitalize
self.verify_ssl = verify_ssl
self.cert_file = cert_file
self.key_file = key_file
self.ca_cert_file = ca_cert_file
self.username = username
self.password = password
self.schema = schema
self.timeout = timeout
# establish connection
self.cursor = None
self.connection = None
self._do_connect(crate_hosts)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.exit()
def get_num_columns(self):
return 80
def should_autocomplete(self):
return self._autocomplete
def should_autocapitalize(self):
return self._autocapitalize
def pprint(self, rows, cols):
result = Result(cols,
rows,
self.cursor.rowcount,
self.cursor.duration,
self.get_num_columns())
self.output_writer.write(result)
def process_iterable(self, stdin):
any_statement = False
for statement in _parse_statements(stdin):
self._exec(statement)
any_statement = True
return any_statement
def process(self, text):
if text.startswith('\\'):
self._try_exec_cmd(text.lstrip('\\'))
else:
for statement in _parse_statements([text]):
self._exec(statement)
def exit(self):
self.close()
return self.exit_code
def close(self):
if self.is_closed():
raise ProgrammingError('CrateShell is already closed')
if self.cursor:
self.cursor.close()
self.cursor = None
if self.connection:
self.connection.close()
self.connection = None
def is_closed(self):
return not (self.cursor and self.connection)
@noargs_command
def _show_tables(self, *args):
""" print the existing tables within the 'doc' schema """
v = self.connection.lowest_server_version
schema_name = \
"table_schema" if v >= TABLE_SCHEMA_MIN_VERSION else "schema_name"
table_filter = \
" AND table_type = 'BASE TABLE'" if v >= TABLE_TYPE_MIN_VERSION else ""
self._exec("SELECT format('%s.%s', {schema}, table_name) AS name "
"FROM information_schema.tables "
"WHERE {schema} NOT IN ('sys','information_schema', 'pg_catalog')"
"{table_filter}"
.format(schema=schema_name, table_filter=table_filter))
@noargs_command
def _quit(self, *args):
""" quit crash """
self.logger.warn('Bye!')
sys.exit(self.exit())
def is_conn_available(self):
return self.connection and \
self.connection.lowest_server_version != StrictVersion("0.0.0")
def _do_connect(self, servers):
self.last_connected_servers = servers
if self.cursor or self.connection:
self.close() # reset open cursor and connection
self.connection = connect(servers,
error_trace=self.error_trace,
verify_ssl_cert=self.verify_ssl,
cert_file=self.cert_file,
key_file=self.key_file,
ca_cert=self.ca_cert_file,
username=self.username,
password=self.password,
schema=self.schema,
timeout=self.timeout)
self.cursor = self.connection.cursor()
self._fetch_session_info()
def reconnect(self):
"""Connect with same configuration and to last connected servers"""
self._do_connect(self.last_connected_servers)
def _verify_connection(self, verbose=False):
results = []
failed = 0
client = self.connection.client
for server in client.server_pool.keys():
try:
infos = client.server_infos(server)
except ConnectionError as e:
failed += 1
results.append([server, None, '0.0.0', False, e.message])
else:
results.append(infos + (True, 'OK', ))
# sort by CONNECTED DESC, SERVER_URL
results.sort(key=itemgetter(3), reverse=True)
results.sort(key=itemgetter(0))
if verbose:
cols = ['server_url', 'node_name', 'version', 'connected', 'message']
self.pprint(results, cols)
if failed == len(results):
self.logger.critical('CONNECT ERROR')
else:
self.logger.info('CONNECT OK')
# Execute cluster/node checks only in verbose mode
if verbose:
SysInfoCommand.CLUSTER_INFO['information_schema_query'] = \
get_information_schema_query(self.connection.lowest_server_version)
# check for failing node and cluster checks
built_in_commands['check'](self, startup=True)
def _fetch_session_info(self):
if self.is_conn_available() \
and self.connection.lowest_server_version >= StrictVersion("2.0"):
user, schema = self._user_and_schema()
self.connect_info = ConnectionMeta(user, schema)
else:
self.connect_info = ConnectionMeta(None, None)
def _user_and_schema(self):
try:
# CURRENT_USER function is only available in Enterprise Edition.
self.cursor.execute("""
SELECT
current_user AS "user",
current_schema AS "schema";
""")
except ProgrammingError:
self.cursor.execute("""
SELECT
NULL AS "user",
current_schema AS "schema";
""")
return self.cursor.fetchone()
def _try_exec_cmd(self, line):
words = line.split(' ', 1)
if not words or not words[0]:
return False
cmd = self.commands.get(words[0].lower().rstrip(';'))
if len(words) > 1:
words[1] = words[1].rstrip(';')
if cmd:
try:
if isinstance(cmd, Command):
message = cmd(self, *words[1:])
else:
message = cmd(*words[1:])
except ProgrammingError as e:
# repl needs to handle 401 authorization errors
raise e
except TypeError as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
doc = cmd.__doc__
if doc and not doc.isspace():
self.logger.info('help: {0}'.format(words[0].lower()))
self.logger.info(cmd.__doc__)
except Exception as e:
self.logger.critical(getattr(e, 'message', None) or repr(e))
else:
if message:
self.logger.info(message)
return True
else:
self.logger.critical(
'Unknown command. Type \\? for a full list of available commands.')
return False
def _exec(self, line):
success = self.execute(line)
self.exit_code = self.exit_code or int(not success)
def _execute(self, statement):
try:
self.cursor.execute(statement)
return True
except ConnectionError as e:
if self.error_trace:
self.logger.warn(str(e))
self.logger.warn(
'Use \\connect <server> to connect to one or more servers first.')
except ProgrammingError as e:
self.logger.critical(e.message)
if self.error_trace and e.error_trace:
self.logger.critical('\n' + e.error_trace)
return False
def execute(self, statement):
success = self._execute(statement)
if not success:
return False
cur = self.cursor
duration = ''
if cur.duration > -1:
duration = ' ({0:.3f} sec)'.format(float(cur.duration) / 1000.0)
print_vars = {
'command': stmt_type(statement),
'rowcount': cur.rowcount,
's': 's'[cur.rowcount == 1:],
'duration': duration
}
if cur.description:
self.pprint(cur.fetchall(), [c[0] for c in cur.description])
tmpl = '{command} {rowcount} row{s} in set{duration}'
else:
tmpl = '{command} OK, {rowcount} row{s} affected {duration}'
self.logger.info(tmpl.format(**print_vars))
return True
|
crate/crash
|
src/crate/crash/sysinfo.py
|
SysInfoCommand.execute
|
python
|
def execute(self):
if not self.cmd.is_conn_available():
return
if self.cmd.connection.lowest_server_version >= SYSINFO_MIN_VERSION:
success, rows = self._sys_info()
self.cmd.exit_code = self.cmd.exit_code or int(not success)
if success:
for result in rows:
self.cmd.pprint(result.rows, result.cols)
self.cmd.logger.info(
"For debugging purposes you can send above listed information to support@crate.io")
else:
tmpl = 'Crate {version} does not support the cluster "sysinfo" command'
self.cmd.logger.warn(tmpl
.format(version=self.cmd.connection.lowest_server_version))
|
print system and cluster info
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/sysinfo.py#L72-L87
| null |
class SysInfoCommand(object):
CLUSTER_INFO = {
'shards_query': """
select count(*) as number_of_shards, cast(sum(num_docs) as long) as number_of_records
from sys.shards
where "primary" = true
""",
'nodes_query': """
select count(*) as number_of_nodes
from sys.nodes
""",
}
NODES_INFO = [
"""select name,
hostname,
version['number'] as crate_version,
round(heap['max'] / 1024.0 / 1024.0)
as total_heap_mb,
round((mem['free'] + mem['used']) / 1024.0 / 1024.0)
as total_memory_mb,
os_info['available_processors'] as cpus,
os['uptime'] /1000 as uptime_s,
format('%s - %s (%s)',
os_info['name'],
os_info['version'],
os_info['arch']) as os_info,
format('java version \"%s\" %s %s (build %s)',
os_info['jvm']['version'],
os_info['jvm']['vm_vendor'],
os_info['jvm']['vm_name'],
os_info['jvm']['vm_version']) as jvm_info
from sys.nodes
order by os['uptime'] desc""",
]
def __init__(self, cmd):
self.cmd = cmd
def _sys_info(self):
result = []
success = self._cluster_info(result)
success &= self._nodes_info(result)
if success is False:
result = []
return (success, result)
def _cluster_info(self, result):
rows = []
cols = []
for query in SysInfoCommand.CLUSTER_INFO:
success = self.cmd._execute(SysInfoCommand.CLUSTER_INFO[query])
if success is False:
return success
rows.extend(self.cmd.cursor.fetchall()[0])
cols.extend([c[0] for c in self.cmd.cursor.description])
result.append(Result([rows], cols))
return True
def _nodes_info(self, result):
success = self.cmd._execute(SysInfoCommand.NODES_INFO[0])
if success:
result.append(Result(self.cmd.cursor.fetchall(),
[c[0] for c in self.cmd.cursor.description]))
return success
|
crate/crash
|
src/crate/crash/config.py
|
Configuration.bwc_bool_transform_from
|
python
|
def bwc_bool_transform_from(cls, x):
if x.lower() == 'true':
return True
elif x.lower() == 'false':
return False
return bool(int(x))
|
Read boolean values from old config files correctly
and interpret 'True' and 'False' as correct booleans.
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/config.py#L44-L53
| null |
class Configuration(object):
"""
Model that reads default values for the CLI argument parser
from a configuration file.
"""
@classmethod
def __init__(self, path):
self.type_mapping = {
str: partial(self._get_or_set,
transform_from=lambda x: str(x),
transform_to=lambda x: str(x)),
int: partial(self._get_or_set,
transform_from=lambda x: int(x),
transform_to=lambda x: str(x)),
bool: partial(self._get_or_set,
transform_from=Configuration.bwc_bool_transform_from,
transform_to=lambda x: str(int(x))),
list: partial(self._get_or_set,
transform_from=lambda x: x.split('\n'),
transform_to=lambda x: '\n'.join(x)),
}
if not path.endswith('.cfg'):
raise ConfigurationError('Path to configuration file needs to end with .cfg')
self.path = path
self.cfg = configparser.ConfigParser()
self.read_and_create_if_necessary()
self.add_crash_section_if_necessary()
def read_and_create_if_necessary(self):
dir = os.path.dirname(self.path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
if not os.path.exists(self.path):
self.save()
self.cfg.read(self.path)
def add_crash_section_if_necessary(self):
if 'crash' not in self.cfg.sections():
self.cfg.add_section('crash')
def get_or_set(self, key, default_value):
option_type = type(default_value)
if option_type in self.type_mapping:
return self.type_mapping[option_type](key, default_value)
return self._get_or_set(key, default_value)
def _get_or_set(self, key,
default_value=None,
transform_from=lambda x: x,
transform_to=lambda x: x):
assert 'crash' in self.cfg.sections()
value = None
try:
value = self.cfg.get('crash', key)
except configparser.NoOptionError:
if default_value is not None:
self.cfg.set('crash', key, transform_to(default_value))
return default_value if value is None else transform_from(value)
def save(self):
with open(self.path, 'w') as fp:
self.cfg.write(fp)
|
crate/crash
|
src/crate/crash/outputs.py
|
_transform_field
|
python
|
def _transform_field(field):
if isinstance(field, bool):
return TRUE if field else FALSE
elif isinstance(field, (list, dict)):
return json.dumps(field, sort_keys=True, ensure_ascii=False)
else:
return field
|
transform field for displaying
|
train
|
https://github.com/crate/crash/blob/32d3ddc78fd2f7848ed2b99d9cd8889e322528d9/src/crate/crash/outputs.py#L42-L49
| null |
import csv
import json
import sys
from pygments import highlight
from pygments.lexers.data import JsonLexer
from pygments.formatters import TerminalFormatter
from colorama import Fore, Style
from .tabulate import TableFormat, Line as TabulateLine, DataRow, tabulate, float_format
if sys.version_info[:2] == (2, 6):
OrderedDict = dict
else:
from collections import OrderedDict
NULL = 'NULL'
TRUE = 'TRUE'
FALSE = 'FALSE'
crate_fmt = TableFormat(lineabove=TabulateLine("+", "-", "+", "+"),
linebelowheader=TabulateLine("+", "-", "+", "+"),
linebetweenrows=None,
linebelow=TabulateLine("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=None)
def _val_len(v):
if not v:
return 4 # will be displayed as NULL
if isinstance(v, (list, dict)):
return len(json.dumps(v))
if hasattr(v, '__len__'):
return len(v)
return len(str(v))
class OutputWriter(object):
def __init__(self, writer, is_tty):
self.is_tty = is_tty
self._json_lexer = JsonLexer()
self._formatter = TerminalFormatter()
self.writer = writer
self._output_format = 'tabular'
self._formats = {
'tabular': self.tabular,
'json': self.json,
'csv': self.csv,
'raw': self.raw,
'mixed': self.mixed,
'dynamic': self.dynamic,
'json_row': self.json_row
}
@property
def formats(self):
return self._formats.keys()
@property
def output_format(self):
return self._output_format
@output_format.setter
def output_format(self, fmt):
if fmt not in self.formats:
raise ValueError('format: {0} is invalid. Valid formats are: {1}')
self._output_format = fmt
def to_json_str(self, obj, **kwargs):
json_str = json.dumps(obj, indent=2, **kwargs)
if self.is_tty:
return highlight(json_str, self._json_lexer, self._formatter).rstrip('\n')
return json_str
def write(self, result):
output_f = self._formats[self.output_format]
output = output_f(result)
if output:
for line in output:
self.writer.write(line)
self.writer.write('\n')
def raw(self, result):
duration = result.duration
yield self.to_json_str(dict(
rows=result.rows,
cols=result.cols,
rowcount=result.rowcount,
duration=duration > -1 and float(duration) / 1000.0 or duration,
))
def tabular(self, result):
rows = [list(map(_transform_field, row)) for row in result.rows]
return tabulate(rows,
headers=result.cols,
tablefmt=crate_fmt,
floatfmt="",
missingval=NULL)
def mixed(self, result):
padding = max_col_len = max(len(c) for c in result.cols)
if self.is_tty:
max_col_len += len(Fore.YELLOW + Style.RESET_ALL)
tmpl = '{0:<' + str(max_col_len) + '} | {1}'
row_delimiter = '-' * result.output_width
for row in result.rows:
for i, c in enumerate(result.cols):
val = self._mixed_format(row[i], max_col_len, padding)
if self.is_tty:
c = Fore.YELLOW + c + Style.RESET_ALL
yield tmpl.format(c, val)
yield row_delimiter + '\n'
def json(self, result):
obj = [OrderedDict(zip(result.cols, x)) for x in result.rows]
yield self.to_json_str(obj)
def csv(self, result):
wr = csv.writer(self.writer, doublequote=False, escapechar='\\', quotechar="'")
wr.writerow(result.cols)
def json_dumps(r):
t = type(r)
return json.dumps(r, sort_keys=True) if t == dict or t == list else r
for row in iter(result.rows):
wr.writerow(list(map(json_dumps, row)))
def dynamic(self, result):
max_cols_required = sum(len(c) + 4 for c in result.cols) + 1
for row in result.rows:
cols_required = sum(_val_len(v) + 4 for v in row) + 1
if cols_required > max_cols_required:
max_cols_required = cols_required
if max_cols_required > result.output_width:
return self.mixed(result)
else:
return self.tabular(result)
def json_row(self, result):
rows = (json.dumps(dict(zip(result.cols, x))) for x in result.rows)
for row in rows:
if self.is_tty:
yield highlight(row, self._json_lexer, self._formatter)
else:
yield row + '\n'
def _mixed_format(self, value, max_col_len, padding):
if value is None:
value = NULL
elif isinstance(value, (list, dict)):
self.to_json_str(value, sort_keys=True)
json_str = json.dumps(value, indent=2, sort_keys=True)
lines = json_str.split('\n')
lines[-1] = ' ' + lines[-1]
lines = [lines[0]] + [' ' * padding + ' |' + l for l in lines[1:]]
value = '\n'.join(lines)
elif isinstance(value, float):
value = float_format(value)
elif isinstance(value, int):
value = str(value)
return value + '\n'
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/application_policy.py
|
ApplicationPolicy.validate
|
python
|
def validate(self):
if not self.principals:
raise InvalidApplicationPolicyError(error_message='principals not provided')
if not self.actions:
raise InvalidApplicationPolicyError(error_message='actions not provided')
if any(not self._PRINCIPAL_PATTERN.match(p) for p in self.principals):
raise InvalidApplicationPolicyError(
error_message='principal should be 12-digit AWS account ID or "*"')
unsupported_actions = sorted(set(self.actions) - set(self.SUPPORTED_ACTIONS))
if unsupported_actions:
raise InvalidApplicationPolicyError(
error_message='{} not supported'.format(', '.join(unsupported_actions)))
return True
|
Check if the formats of principals and actions are valid.
:return: True, if the policy is valid
:raises: InvalidApplicationPolicyError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/application_policy.py#L44-L66
| null |
class ApplicationPolicy(object):
"""Class representing SAR application policy."""
# Supported actions for setting SAR application permissions
GET_APPLICATION = 'GetApplication'
LIST_APPLICATION_DEPENDENCIES = 'ListApplicationDependencies'
CREATE_CLOUD_FORMATION_CHANGE_SET = 'CreateCloudFormationChangeSet'
CREATE_CLOUD_FORMATION_TEMPLATE = 'CreateCloudFormationTemplate'
LIST_APPLICATION_VERSIONS = 'ListApplicationVersions'
SEARCH_APPLICATIONS = 'SearchApplications'
DEPLOY = 'Deploy'
SUPPORTED_ACTIONS = [
GET_APPLICATION,
LIST_APPLICATION_DEPENDENCIES,
CREATE_CLOUD_FORMATION_CHANGE_SET,
CREATE_CLOUD_FORMATION_TEMPLATE,
LIST_APPLICATION_VERSIONS,
SEARCH_APPLICATIONS,
DEPLOY
]
_PRINCIPAL_PATTERN = re.compile(r'^([0-9]{12}|\*)$')
def __init__(self, principals, actions):
"""
Initialize the object given the principals and actions.
:param principals: List of AWS account IDs, or *
:type principals: list of str
:param actions: List of actions supported by SAR
:type actions: list of str
"""
self.principals = principals
self.actions = actions
def to_statement(self):
"""
Convert to a policy statement dictionary.
:return: Dictionary containing Actions and Principals
:rtype: dict
"""
return {
'Principals': self.principals,
'Actions': self.actions
}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
publish_application
|
python
|
def publish_application(template, sar_client=None):
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
|
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L21-L76
|
[
"def yaml_dump(dict_to_dump):\n \"\"\"\n Dump the dictionary as a YAML document.\n\n :param dict_to_dump: Data to be serialized as YAML\n :type dict_to_dump: dict\n :return: YAML document\n :rtype: str\n \"\"\"\n yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)\n return yaml.safe_dump(dict_to_dump, default_flow_style=False)\n",
"def get_app_metadata(template_dict):\n \"\"\"\n Get the application metadata from a SAM template.\n\n :param template_dict: SAM template as a dictionary\n :type template_dict: dict\n :return: Application metadata as defined in the template\n :rtype: ApplicationMetadata\n :raises ApplicationMetadataNotFoundError\n \"\"\"\n if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):\n app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)\n return ApplicationMetadata(app_metadata_dict)\n\n raise ApplicationMetadataNotFoundError(\n error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))\n",
"def parse_application_id(text):\n \"\"\"\n Extract the application id from input text.\n\n :param text: text to parse\n :type text: str\n :return: application id if found in the input\n :rtype: str\n \"\"\"\n result = re.search(APPLICATION_ID_PATTERN, text)\n return result.group(0) if result else None\n",
"def strip_app_metadata(template_dict):\n \"\"\"\n Strip the \"AWS::ServerlessRepo::Application\" metadata section from template.\n\n :param template_dict: SAM template as a dictionary\n :type template_dict: dict\n :return: stripped template content\n :rtype: str\n \"\"\"\n if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):\n return template_dict\n\n template_dict_copy = copy.deepcopy(template_dict)\n\n # strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it\n if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:\n template_dict_copy.pop(METADATA, None)\n else:\n template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)\n\n return template_dict_copy\n",
"def _get_template_dict(template):\n \"\"\"\n Parse string template and or copy dictionary template.\n\n :param template: Content of a packaged YAML or JSON SAM template\n :type template: str_or_dict\n :return: Template as a dictionary\n :rtype: dict\n :raises ValueError\n \"\"\"\n if isinstance(template, str):\n return parse_template(template)\n\n if isinstance(template, dict):\n return copy.deepcopy(template)\n\n raise ValueError('Input template should be a string or dictionary')\n",
"def _create_application_request(app_metadata, template):\n \"\"\"\n Construct the request body to create application.\n\n :param app_metadata: Object containing app metadata\n :type app_metadata: ApplicationMetadata\n :param template: A packaged YAML or JSON SAM template\n :type template: str\n :return: SAR CreateApplication request body\n :rtype: dict\n \"\"\"\n app_metadata.validate(['author', 'description', 'name'])\n request = {\n 'Author': app_metadata.author,\n 'Description': app_metadata.description,\n 'HomePageUrl': app_metadata.home_page_url,\n 'Labels': app_metadata.labels,\n 'LicenseUrl': app_metadata.license_url,\n 'Name': app_metadata.name,\n 'ReadmeUrl': app_metadata.readme_url,\n 'SemanticVersion': app_metadata.semantic_version,\n 'SourceCodeUrl': app_metadata.source_code_url,\n 'SpdxLicenseId': app_metadata.spdx_license_id,\n 'TemplateBody': template\n }\n # Remove None values\n return {k: v for k, v in request.items() if v}\n",
"def _is_conflict_exception(e):\n \"\"\"\n Check whether the botocore ClientError is ConflictException.\n\n :param e: botocore exception\n :type e: ClientError\n :return: True if e is ConflictException\n \"\"\"\n error_code = e.response['Error']['Code']\n return error_code == 'ConflictException'\n",
"def _wrap_client_error(e):\n \"\"\"\n Wrap botocore ClientError exception into ServerlessRepoClientError.\n\n :param e: botocore exception\n :type e: ClientError\n :return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError\n \"\"\"\n error_code = e.response['Error']['Code']\n message = e.response['Error']['Message']\n\n if error_code == 'BadRequestException':\n if \"Failed to copy S3 object. Access denied:\" in message:\n match = re.search('bucket=(.+?), key=(.+?)$', message)\n if match:\n return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))\n if \"Invalid S3 URI\" in message:\n return InvalidS3UriError(message=message)\n\n return ServerlessRepoClientError(message=message)\n",
"def _update_application_request(app_metadata, application_id):\n \"\"\"\n Construct the request body to update application.\n\n :param app_metadata: Object containing app metadata\n :type app_metadata: ApplicationMetadata\n :param application_id: The Amazon Resource Name (ARN) of the application\n :type application_id: str\n :return: SAR UpdateApplication request body\n :rtype: dict\n \"\"\"\n request = {\n 'ApplicationId': application_id,\n 'Author': app_metadata.author,\n 'Description': app_metadata.description,\n 'HomePageUrl': app_metadata.home_page_url,\n 'Labels': app_metadata.labels,\n 'ReadmeUrl': app_metadata.readme_url\n }\n return {k: v for k, v in request.items() if v}\n",
"def _create_application_version_request(app_metadata, application_id, template):\n \"\"\"\n Construct the request body to create application version.\n\n :param app_metadata: Object containing app metadata\n :type app_metadata: ApplicationMetadata\n :param application_id: The Amazon Resource Name (ARN) of the application\n :type application_id: str\n :param template: A packaged YAML or JSON SAM template\n :type template: str\n :return: SAR CreateApplicationVersion request body\n :rtype: dict\n \"\"\"\n app_metadata.validate(['semantic_version'])\n request = {\n 'ApplicationId': application_id,\n 'SemanticVersion': app_metadata.semantic_version,\n 'SourceCodeUrl': app_metadata.source_code_url,\n 'TemplateBody': template\n }\n return {k: v for k, v in request.items() if v}\n",
"def _get_publish_details(actions, app_metadata_template):\n \"\"\"\n Get the changed application details after publishing.\n\n :param actions: Actions taken during publishing\n :type actions: list of str\n :param app_metadata_template: Original template definitions of app metadata\n :type app_metadata_template: dict\n :return: Updated fields and values of the application\n :rtype: dict\n \"\"\"\n if actions == [CREATE_APPLICATION]:\n return {k: v for k, v in app_metadata_template.items() if v}\n\n include_keys = [\n ApplicationMetadata.AUTHOR,\n ApplicationMetadata.DESCRIPTION,\n ApplicationMetadata.HOME_PAGE_URL,\n ApplicationMetadata.LABELS,\n ApplicationMetadata.README_URL\n ]\n\n if CREATE_APPLICATION_VERSION in actions:\n # SemanticVersion and SourceCodeUrl can only be updated by creating a new version\n additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]\n include_keys.extend(additional_keys)\n return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}\n"
] |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
update_application_metadata
|
python
|
def update_application_metadata(template, application_id, sar_client=None):
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
|
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L79-L100
|
[
"def get_app_metadata(template_dict):\n \"\"\"\n Get the application metadata from a SAM template.\n\n :param template_dict: SAM template as a dictionary\n :type template_dict: dict\n :return: Application metadata as defined in the template\n :rtype: ApplicationMetadata\n :raises ApplicationMetadataNotFoundError\n \"\"\"\n if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):\n app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)\n return ApplicationMetadata(app_metadata_dict)\n\n raise ApplicationMetadataNotFoundError(\n error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))\n",
"def _get_template_dict(template):\n \"\"\"\n Parse string template and or copy dictionary template.\n\n :param template: Content of a packaged YAML or JSON SAM template\n :type template: str_or_dict\n :return: Template as a dictionary\n :rtype: dict\n :raises ValueError\n \"\"\"\n if isinstance(template, str):\n return parse_template(template)\n\n if isinstance(template, dict):\n return copy.deepcopy(template)\n\n raise ValueError('Input template should be a string or dictionary')\n",
"def _update_application_request(app_metadata, application_id):\n \"\"\"\n Construct the request body to update application.\n\n :param app_metadata: Object containing app metadata\n :type app_metadata: ApplicationMetadata\n :param application_id: The Amazon Resource Name (ARN) of the application\n :type application_id: str\n :return: SAR UpdateApplication request body\n :rtype: dict\n \"\"\"\n request = {\n 'ApplicationId': application_id,\n 'Author': app_metadata.author,\n 'Description': app_metadata.description,\n 'HomePageUrl': app_metadata.home_page_url,\n 'Labels': app_metadata.labels,\n 'ReadmeUrl': app_metadata.readme_url\n }\n return {k: v for k, v in request.items() if v}\n"
] |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
_get_template_dict
|
python
|
def _get_template_dict(template):
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
|
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L103-L119
|
[
"def parse_template(template_str):\n \"\"\"\n Parse the SAM template.\n\n :param template_str: A packaged YAML or json CloudFormation template\n :type template_str: str\n :return: Dictionary with keys defined in the template\n :rtype: dict\n \"\"\"\n try:\n # PyYAML doesn't support json as well as it should, so if the input\n # is actually just json it is better to parse it with the standard\n # json parser.\n return json.loads(template_str, object_pairs_hook=OrderedDict)\n except ValueError:\n yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor)\n yaml.SafeLoader.add_multi_constructor('!', intrinsics_multi_constructor)\n return yaml.safe_load(template_str)\n"
] |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
_create_application_request
|
python
|
def _create_application_request(app_metadata, template):
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
|
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L122-L148
| null |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
_update_application_request
|
python
|
def _update_application_request(app_metadata, application_id):
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
|
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L151-L170
| null |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
_create_application_version_request
|
python
|
def _create_application_version_request(app_metadata, application_id, template):
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
|
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L173-L193
| null |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
_wrap_client_error
|
python
|
def _wrap_client_error(e):
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
|
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L208-L227
| null |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _get_publish_details(actions, app_metadata_template):
"""
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
"""
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/publish.py
|
_get_publish_details
|
python
|
def _get_publish_details(actions, app_metadata_template):
if actions == [CREATE_APPLICATION]:
return {k: v for k, v in app_metadata_template.items() if v}
include_keys = [
ApplicationMetadata.AUTHOR,
ApplicationMetadata.DESCRIPTION,
ApplicationMetadata.HOME_PAGE_URL,
ApplicationMetadata.LABELS,
ApplicationMetadata.README_URL
]
if CREATE_APPLICATION_VERSION in actions:
# SemanticVersion and SourceCodeUrl can only be updated by creating a new version
additional_keys = [ApplicationMetadata.SEMANTIC_VERSION, ApplicationMetadata.SOURCE_CODE_URL]
include_keys.extend(additional_keys)
return {k: v for k, v in app_metadata_template.items() if k in include_keys and v}
|
Get the changed application details after publishing.
:param actions: Actions taken during publishing
:type actions: list of str
:param app_metadata_template: Original template definitions of app metadata
:type app_metadata_template: dict
:return: Updated fields and values of the application
:rtype: dict
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/publish.py#L230-L256
| null |
"""Module containing functions to publish or update application."""
import re
import copy
import boto3
from botocore.exceptions import ClientError
from .application_metadata import ApplicationMetadata
from .parser import (
yaml_dump, parse_template, get_app_metadata,
parse_application_id, strip_app_metadata
)
from .exceptions import ServerlessRepoClientError, S3PermissionsRequired, InvalidS3UriError
CREATE_APPLICATION = 'CREATE_APPLICATION'
UPDATE_APPLICATION = 'UPDATE_APPLICATION'
CREATE_APPLICATION_VERSION = 'CREATE_APPLICATION_VERSION'
def publish_application(template, sar_client=None):
"""
Create a new application or new application version in SAR.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:return: Dictionary containing application id, actions taken, and updated details
:rtype: dict
:raises ValueError
"""
if not template:
raise ValueError('Require SAM template to publish the application')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
stripped_template_dict = strip_app_metadata(template_dict)
stripped_template = yaml_dump(stripped_template_dict)
try:
request = _create_application_request(app_metadata, stripped_template)
response = sar_client.create_application(**request)
application_id = response['ApplicationId']
actions = [CREATE_APPLICATION]
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
# Update the application if it already exists
error_message = e.response['Error']['Message']
application_id = parse_application_id(error_message)
try:
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
actions = [UPDATE_APPLICATION]
except ClientError as e:
raise _wrap_client_error(e)
# Create application version if semantic version is specified
if app_metadata.semantic_version:
try:
request = _create_application_version_request(app_metadata, application_id, stripped_template)
sar_client.create_application_version(**request)
actions.append(CREATE_APPLICATION_VERSION)
except ClientError as e:
if not _is_conflict_exception(e):
raise _wrap_client_error(e)
return {
'application_id': application_id,
'actions': actions,
'details': _get_publish_details(actions, app_metadata.template_dict)
}
def update_application_metadata(template, application_id, sar_client=None):
"""
Update the application metadata.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not template or not application_id:
raise ValueError('Require SAM template and application ID to update application metadata')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
template_dict = _get_template_dict(template)
app_metadata = get_app_metadata(template_dict)
request = _update_application_request(app_metadata, application_id)
sar_client.update_application(**request)
def _get_template_dict(template):
"""
Parse string template and or copy dictionary template.
:param template: Content of a packaged YAML or JSON SAM template
:type template: str_or_dict
:return: Template as a dictionary
:rtype: dict
:raises ValueError
"""
if isinstance(template, str):
return parse_template(template)
if isinstance(template, dict):
return copy.deepcopy(template)
raise ValueError('Input template should be a string or dictionary')
def _create_application_request(app_metadata, template):
"""
Construct the request body to create application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplication request body
:rtype: dict
"""
app_metadata.validate(['author', 'description', 'name'])
request = {
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'LicenseUrl': app_metadata.license_url,
'Name': app_metadata.name,
'ReadmeUrl': app_metadata.readme_url,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'SpdxLicenseId': app_metadata.spdx_license_id,
'TemplateBody': template
}
# Remove None values
return {k: v for k, v in request.items() if v}
def _update_application_request(app_metadata, application_id):
"""
Construct the request body to update application.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:return: SAR UpdateApplication request body
:rtype: dict
"""
request = {
'ApplicationId': application_id,
'Author': app_metadata.author,
'Description': app_metadata.description,
'HomePageUrl': app_metadata.home_page_url,
'Labels': app_metadata.labels,
'ReadmeUrl': app_metadata.readme_url
}
return {k: v for k, v in request.items() if v}
def _create_application_version_request(app_metadata, application_id, template):
"""
Construct the request body to create application version.
:param app_metadata: Object containing app metadata
:type app_metadata: ApplicationMetadata
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param template: A packaged YAML or JSON SAM template
:type template: str
:return: SAR CreateApplicationVersion request body
:rtype: dict
"""
app_metadata.validate(['semantic_version'])
request = {
'ApplicationId': application_id,
'SemanticVersion': app_metadata.semantic_version,
'SourceCodeUrl': app_metadata.source_code_url,
'TemplateBody': template
}
return {k: v for k, v in request.items() if v}
def _is_conflict_exception(e):
"""
Check whether the botocore ClientError is ConflictException.
:param e: botocore exception
:type e: ClientError
:return: True if e is ConflictException
"""
error_code = e.response['Error']['Code']
return error_code == 'ConflictException'
def _wrap_client_error(e):
"""
Wrap botocore ClientError exception into ServerlessRepoClientError.
:param e: botocore exception
:type e: ClientError
:return: S3PermissionsRequired or InvalidS3UriError or general ServerlessRepoClientError
"""
error_code = e.response['Error']['Code']
message = e.response['Error']['Message']
if error_code == 'BadRequestException':
if "Failed to copy S3 object. Access denied:" in message:
match = re.search('bucket=(.+?), key=(.+?)$', message)
if match:
return S3PermissionsRequired(bucket=match.group(1), key=match.group(2))
if "Invalid S3 URI" in message:
return InvalidS3UriError(message=message)
return ServerlessRepoClientError(message=message)
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/application_metadata.py
|
ApplicationMetadata.validate
|
python
|
def validate(self, required_props):
missing_props = [p for p in required_props if not getattr(self, p)]
if missing_props:
missing_props_str = ', '.join(sorted(missing_props))
raise InvalidApplicationMetadataError(properties=missing_props_str)
return True
|
Check if the required application metadata properties have been populated.
:param required_props: List of required properties
:type required_props: list
:return: True, if the metadata is valid
:raises: InvalidApplicationMetadataError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/application_metadata.py#L44-L57
| null |
class ApplicationMetadata(object):
"""Class representing SAR metadata."""
# SAM template SAR metadata properties
NAME = 'Name'
DESCRIPTION = 'Description'
AUTHOR = 'Author'
SPDX_LICENSE_ID = 'SpdxLicenseId'
LICENSE_URL = 'LicenseUrl'
README_URL = 'ReadmeUrl'
LABELS = 'Labels'
HOME_PAGE_URL = 'HomePageUrl'
SEMANTIC_VERSION = 'SemanticVersion'
SOURCE_CODE_URL = 'SourceCodeUrl'
def __init__(self, app_metadata):
"""
Initialize the object given SAR metadata properties.
:param app_metadata: Dictionary containing SAR metadata properties
:type app_metadata: dict
"""
self.template_dict = app_metadata # save the original template definitions
self.name = app_metadata.get(self.NAME)
self.description = app_metadata.get(self.DESCRIPTION)
self.author = app_metadata.get(self.AUTHOR)
self.spdx_license_id = app_metadata.get(self.SPDX_LICENSE_ID)
self.license_url = app_metadata.get(self.LICENSE_URL)
self.readme_url = app_metadata.get(self.README_URL)
self.labels = app_metadata.get(self.LABELS)
self.home_page_url = app_metadata.get(self.HOME_PAGE_URL)
self.semantic_version = app_metadata.get(self.SEMANTIC_VERSION)
self.source_code_url = app_metadata.get(self.SOURCE_CODE_URL)
def __eq__(self, other):
"""Return whether two ApplicationMetadata objects are equal."""
return isinstance(other, type(self)) and self.__dict__ == other.__dict__
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/parser.py
|
yaml_dump
|
python
|
def yaml_dump(dict_to_dump):
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False)
|
Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L61-L71
| null |
"""Helper to parse JSON/YAML SAM template and dump YAML files."""
import re
import copy
import json
from collections import OrderedDict
import six
import yaml
from yaml.resolver import ScalarNode, SequenceNode
from .application_metadata import ApplicationMetadata
from .exceptions import ApplicationMetadataNotFoundError
METADATA = 'Metadata'
SERVERLESS_REPO_APPLICATION = 'AWS::ServerlessRepo::Application'
APPLICATION_ID_PATTERN = r'arn:[\w\-]+:serverlessrepo:[\w\-]+:[0-9]+:applications\/[\S]+'
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
:return: a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = 'Fn::'
if tag in ['Ref', 'Condition']:
prefix = ''
cfntag = prefix + tag
if tag == 'GetAtt' and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value}
def _dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def _dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def parse_template(template_str):
"""
Parse the SAM template.
:param template_str: A packaged YAML or json CloudFormation template
:type template_str: str
:return: Dictionary with keys defined in the template
:rtype: dict
"""
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(template_str, object_pairs_hook=OrderedDict)
except ValueError:
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor)
yaml.SafeLoader.add_multi_constructor('!', intrinsics_multi_constructor)
return yaml.safe_load(template_str)
def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
def parse_application_id(text):
"""
Extract the application id from input text.
:param text: text to parse
:type text: str
:return: application id if found in the input
:rtype: str
"""
result = re.search(APPLICATION_ID_PATTERN, text)
return result.group(0) if result else None
def strip_app_metadata(template_dict):
"""
Strip the "AWS::ServerlessRepo::Application" metadata section from template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: stripped template content
:rtype: str
"""
if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):
return template_dict
template_dict_copy = copy.deepcopy(template_dict)
# strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it
if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:
template_dict_copy.pop(METADATA, None)
else:
template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)
return template_dict_copy
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/parser.py
|
parse_template
|
python
|
def parse_template(template_str):
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(template_str, object_pairs_hook=OrderedDict)
except ValueError:
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor)
yaml.SafeLoader.add_multi_constructor('!', intrinsics_multi_constructor)
return yaml.safe_load(template_str)
|
Parse the SAM template.
:param template_str: A packaged YAML or json CloudFormation template
:type template_str: str
:return: Dictionary with keys defined in the template
:rtype: dict
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L78-L95
| null |
"""Helper to parse JSON/YAML SAM template and dump YAML files."""
import re
import copy
import json
from collections import OrderedDict
import six
import yaml
from yaml.resolver import ScalarNode, SequenceNode
from .application_metadata import ApplicationMetadata
from .exceptions import ApplicationMetadataNotFoundError
METADATA = 'Metadata'
SERVERLESS_REPO_APPLICATION = 'AWS::ServerlessRepo::Application'
APPLICATION_ID_PATTERN = r'arn:[\w\-]+:serverlessrepo:[\w\-]+:[0-9]+:applications\/[\S]+'
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
:return: a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = 'Fn::'
if tag in ['Ref', 'Condition']:
prefix = ''
cfntag = prefix + tag
if tag == 'GetAtt' and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value}
def _dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def yaml_dump(dict_to_dump):
"""
Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str
"""
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False)
def _dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
def parse_application_id(text):
"""
Extract the application id from input text.
:param text: text to parse
:type text: str
:return: application id if found in the input
:rtype: str
"""
result = re.search(APPLICATION_ID_PATTERN, text)
return result.group(0) if result else None
def strip_app_metadata(template_dict):
"""
Strip the "AWS::ServerlessRepo::Application" metadata section from template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: stripped template content
:rtype: str
"""
if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):
return template_dict
template_dict_copy = copy.deepcopy(template_dict)
# strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it
if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:
template_dict_copy.pop(METADATA, None)
else:
template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)
return template_dict_copy
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/parser.py
|
get_app_metadata
|
python
|
def get_app_metadata(template_dict):
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
|
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L98-L113
| null |
"""Helper to parse JSON/YAML SAM template and dump YAML files."""
import re
import copy
import json
from collections import OrderedDict
import six
import yaml
from yaml.resolver import ScalarNode, SequenceNode
from .application_metadata import ApplicationMetadata
from .exceptions import ApplicationMetadataNotFoundError
METADATA = 'Metadata'
SERVERLESS_REPO_APPLICATION = 'AWS::ServerlessRepo::Application'
APPLICATION_ID_PATTERN = r'arn:[\w\-]+:serverlessrepo:[\w\-]+:[0-9]+:applications\/[\S]+'
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
:return: a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = 'Fn::'
if tag in ['Ref', 'Condition']:
prefix = ''
cfntag = prefix + tag
if tag == 'GetAtt' and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value}
def _dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def yaml_dump(dict_to_dump):
"""
Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str
"""
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False)
def _dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def parse_template(template_str):
"""
Parse the SAM template.
:param template_str: A packaged YAML or json CloudFormation template
:type template_str: str
:return: Dictionary with keys defined in the template
:rtype: dict
"""
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(template_str, object_pairs_hook=OrderedDict)
except ValueError:
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor)
yaml.SafeLoader.add_multi_constructor('!', intrinsics_multi_constructor)
return yaml.safe_load(template_str)
def parse_application_id(text):
"""
Extract the application id from input text.
:param text: text to parse
:type text: str
:return: application id if found in the input
:rtype: str
"""
result = re.search(APPLICATION_ID_PATTERN, text)
return result.group(0) if result else None
def strip_app_metadata(template_dict):
"""
Strip the "AWS::ServerlessRepo::Application" metadata section from template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: stripped template content
:rtype: str
"""
if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):
return template_dict
template_dict_copy = copy.deepcopy(template_dict)
# strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it
if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:
template_dict_copy.pop(METADATA, None)
else:
template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)
return template_dict_copy
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/parser.py
|
parse_application_id
|
python
|
def parse_application_id(text):
result = re.search(APPLICATION_ID_PATTERN, text)
return result.group(0) if result else None
|
Extract the application id from input text.
:param text: text to parse
:type text: str
:return: application id if found in the input
:rtype: str
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L116-L126
| null |
"""Helper to parse JSON/YAML SAM template and dump YAML files."""
import re
import copy
import json
from collections import OrderedDict
import six
import yaml
from yaml.resolver import ScalarNode, SequenceNode
from .application_metadata import ApplicationMetadata
from .exceptions import ApplicationMetadataNotFoundError
METADATA = 'Metadata'
SERVERLESS_REPO_APPLICATION = 'AWS::ServerlessRepo::Application'
APPLICATION_ID_PATTERN = r'arn:[\w\-]+:serverlessrepo:[\w\-]+:[0-9]+:applications\/[\S]+'
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
:return: a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = 'Fn::'
if tag in ['Ref', 'Condition']:
prefix = ''
cfntag = prefix + tag
if tag == 'GetAtt' and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value}
def _dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def yaml_dump(dict_to_dump):
"""
Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str
"""
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False)
def _dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def parse_template(template_str):
"""
Parse the SAM template.
:param template_str: A packaged YAML or json CloudFormation template
:type template_str: str
:return: Dictionary with keys defined in the template
:rtype: dict
"""
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(template_str, object_pairs_hook=OrderedDict)
except ValueError:
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor)
yaml.SafeLoader.add_multi_constructor('!', intrinsics_multi_constructor)
return yaml.safe_load(template_str)
def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
def strip_app_metadata(template_dict):
"""
Strip the "AWS::ServerlessRepo::Application" metadata section from template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: stripped template content
:rtype: str
"""
if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):
return template_dict
template_dict_copy = copy.deepcopy(template_dict)
# strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it
if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:
template_dict_copy.pop(METADATA, None)
else:
template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)
return template_dict_copy
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/parser.py
|
strip_app_metadata
|
python
|
def strip_app_metadata(template_dict):
if SERVERLESS_REPO_APPLICATION not in template_dict.get(METADATA, {}):
return template_dict
template_dict_copy = copy.deepcopy(template_dict)
# strip the whole metadata section if SERVERLESS_REPO_APPLICATION is the only key in it
if not [k for k in template_dict_copy.get(METADATA) if k != SERVERLESS_REPO_APPLICATION]:
template_dict_copy.pop(METADATA, None)
else:
template_dict_copy.get(METADATA).pop(SERVERLESS_REPO_APPLICATION, None)
return template_dict_copy
|
Strip the "AWS::ServerlessRepo::Application" metadata section from template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: stripped template content
:rtype: str
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/parser.py#L129-L149
| null |
"""Helper to parse JSON/YAML SAM template and dump YAML files."""
import re
import copy
import json
from collections import OrderedDict
import six
import yaml
from yaml.resolver import ScalarNode, SequenceNode
from .application_metadata import ApplicationMetadata
from .exceptions import ApplicationMetadataNotFoundError
METADATA = 'Metadata'
SERVERLESS_REPO_APPLICATION = 'AWS::ServerlessRepo::Application'
APPLICATION_ID_PATTERN = r'arn:[\w\-]+:serverlessrepo:[\w\-]+:[0-9]+:applications\/[\S]+'
def intrinsics_multi_constructor(loader, tag_prefix, node):
"""
YAML constructor to parse CloudFormation intrinsics.
:return: a dictionary with key being the instrinsic name
"""
# Get the actual tag name excluding the first exclamation
tag = node.tag[1:]
# Some intrinsic functions doesn't support prefix "Fn::"
prefix = 'Fn::'
if tag in ['Ref', 'Condition']:
prefix = ''
cfntag = prefix + tag
if tag == 'GetAtt' and isinstance(node.value, six.string_types):
# ShortHand notation for !GetAtt accepts Resource.Attribute format
# while the standard notation is to use an array
# [Resource, Attribute]. Convert shorthand to standard format
value = node.value.split('.', 1)
elif isinstance(node, ScalarNode):
# Value of this node is scalar
value = loader.construct_scalar(node)
elif isinstance(node, SequenceNode):
# Value of this node is an array (Ex: [1,2])
value = loader.construct_sequence(node)
else:
# Value of this node is an mapping (ex: {foo: bar})
value = loader.construct_mapping(node)
return {cfntag: value}
def _dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def yaml_dump(dict_to_dump):
"""
Dump the dictionary as a YAML document.
:param dict_to_dump: Data to be serialized as YAML
:type dict_to_dump: dict
:return: YAML document
:rtype: str
"""
yaml.SafeDumper.add_representer(OrderedDict, _dict_representer)
return yaml.safe_dump(dict_to_dump, default_flow_style=False)
def _dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def parse_template(template_str):
"""
Parse the SAM template.
:param template_str: A packaged YAML or json CloudFormation template
:type template_str: str
:return: Dictionary with keys defined in the template
:rtype: dict
"""
try:
# PyYAML doesn't support json as well as it should, so if the input
# is actually just json it is better to parse it with the standard
# json parser.
return json.loads(template_str, object_pairs_hook=OrderedDict)
except ValueError:
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _dict_constructor)
yaml.SafeLoader.add_multi_constructor('!', intrinsics_multi_constructor)
return yaml.safe_load(template_str)
def get_app_metadata(template_dict):
"""
Get the application metadata from a SAM template.
:param template_dict: SAM template as a dictionary
:type template_dict: dict
:return: Application metadata as defined in the template
:rtype: ApplicationMetadata
:raises ApplicationMetadataNotFoundError
"""
if SERVERLESS_REPO_APPLICATION in template_dict.get(METADATA, {}):
app_metadata_dict = template_dict.get(METADATA).get(SERVERLESS_REPO_APPLICATION)
return ApplicationMetadata(app_metadata_dict)
raise ApplicationMetadataNotFoundError(
error_message='missing {} section in template Metadata'.format(SERVERLESS_REPO_APPLICATION))
def parse_application_id(text):
"""
Extract the application id from input text.
:param text: text to parse
:type text: str
:return: application id if found in the input
:rtype: str
"""
result = re.search(APPLICATION_ID_PATTERN, text)
return result.group(0) if result else None
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/permission_helper.py
|
make_application_private
|
python
|
def make_application_private(application_id, sar_client=None):
if not application_id:
raise ValueError('Require application id to make the app private')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[]
)
|
Set the application to be private.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/permission_helper.py#L32-L51
| null |
"""Module containing methods to manage application permissions."""
import boto3
from .application_policy import ApplicationPolicy
def make_application_public(application_id, sar_client=None):
"""
Set the application to be public.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id:
raise ValueError('Require application id to make the app public')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
application_policy = ApplicationPolicy(['*'], [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[application_policy.to_statement()]
)
def share_application_with_accounts(application_id, account_ids, sar_client=None):
"""
Share the application privately with given AWS account IDs.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param account_ids: List of AWS account IDs, or *
:type account_ids: list of str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id or not account_ids:
raise ValueError('Require application id and list of AWS account IDs to share the app')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
application_policy = ApplicationPolicy(account_ids, [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[application_policy.to_statement()]
)
|
awslabs/aws-serverlessrepo-python
|
serverlessrepo/permission_helper.py
|
share_application_with_accounts
|
python
|
def share_application_with_accounts(application_id, account_ids, sar_client=None):
if not application_id or not account_ids:
raise ValueError('Require application id and list of AWS account IDs to share the app')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
application_policy = ApplicationPolicy(account_ids, [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[application_policy.to_statement()]
)
|
Share the application privately with given AWS account IDs.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param account_ids: List of AWS account IDs, or *
:type account_ids: list of str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
|
train
|
https://github.com/awslabs/aws-serverlessrepo-python/blob/e2126cee0191266cfb8a3a2bc3270bf50330907c/serverlessrepo/permission_helper.py#L54-L77
|
[
"def validate(self):\n \"\"\"\n Check if the formats of principals and actions are valid.\n\n :return: True, if the policy is valid\n :raises: InvalidApplicationPolicyError\n \"\"\"\n if not self.principals:\n raise InvalidApplicationPolicyError(error_message='principals not provided')\n\n if not self.actions:\n raise InvalidApplicationPolicyError(error_message='actions not provided')\n\n if any(not self._PRINCIPAL_PATTERN.match(p) for p in self.principals):\n raise InvalidApplicationPolicyError(\n error_message='principal should be 12-digit AWS account ID or \"*\"')\n\n unsupported_actions = sorted(set(self.actions) - set(self.SUPPORTED_ACTIONS))\n if unsupported_actions:\n raise InvalidApplicationPolicyError(\n error_message='{} not supported'.format(', '.join(unsupported_actions)))\n\n return True\n",
"def to_statement(self):\n \"\"\"\n Convert to a policy statement dictionary.\n\n :return: Dictionary containing Actions and Principals\n :rtype: dict\n \"\"\"\n return {\n 'Principals': self.principals,\n 'Actions': self.actions\n }\n"
] |
"""Module containing methods to manage application permissions."""
import boto3
from .application_policy import ApplicationPolicy
def make_application_public(application_id, sar_client=None):
"""
Set the application to be public.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id:
raise ValueError('Require application id to make the app public')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
application_policy = ApplicationPolicy(['*'], [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[application_policy.to_statement()]
)
def make_application_private(application_id, sar_client=None):
"""
Set the application to be private.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id:
raise ValueError('Require application id to make the app private')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[]
)
|
asifpy/django-crudbuilder
|
crudbuilder/registry.py
|
CrudBuilderRegistry.extract_args
|
python
|
def extract_args(cls, *args):
model = None
crudbuilder = None
for arg in args:
if issubclass(arg, models.Model):
model = arg
else:
crudbuilder = arg
return [model, crudbuilder]
|
Takes any arguments like a model and crud, or just one of
those, in any order, and return a model and crud.
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/registry.py#L18-L32
| null |
class CrudBuilderRegistry(dict):
"""Dictionary like object representing a collection of objects."""
@classmethod
def register(self, *args, **kwargs):
"""
Register a crud.
Two unordered arguments are accepted, at least one should be passed:
- a model,
- a crudbuilder class
"""
assert len(args) <= 2, 'register takes at most 2 args'
assert len(args) > 0, 'register takes at least 1 arg'
model, crudbuilder = self.__class__.extract_args(*args)
if not issubclass(model, models.Model):
msg = "First argument should be Django Model"
raise NotModelException(msg)
key = self._model_key(model, crudbuilder)
if key in self:
msg = "Key '{key}' has already been registered.".format(
key=key
)
raise AlreadyRegistered(msg)
self.__setitem__(key, crudbuilder)
return crudbuilder
def _model_key(self, model, crudbuilder):
app_label = model._meta.app_label
model_name = model.__name__.lower()
postfix_url = helpers.custom_postfix_url(crudbuilder(), model_name)
return '{}-{}-{}'.format(app_label, model_name, postfix_url)
def unregister(self, model):
key = self._model_key(model)
if key in self:
self.__delitem__(key)
def __getitem__(self, name):
"""
Return the CrudBuilder class registered for this name. If none is
registered, raise NotRegistered.
"""
try:
return super(CrudBuilderRegistry, self).__getitem__(name)
except KeyError:
raise NotRegistered(name, self)
|
asifpy/django-crudbuilder
|
crudbuilder/registry.py
|
CrudBuilderRegistry.register
|
python
|
def register(self, *args, **kwargs):
assert len(args) <= 2, 'register takes at most 2 args'
assert len(args) > 0, 'register takes at least 1 arg'
model, crudbuilder = self.__class__.extract_args(*args)
if not issubclass(model, models.Model):
msg = "First argument should be Django Model"
raise NotModelException(msg)
key = self._model_key(model, crudbuilder)
if key in self:
msg = "Key '{key}' has already been registered.".format(
key=key
)
raise AlreadyRegistered(msg)
self.__setitem__(key, crudbuilder)
return crudbuilder
|
Register a crud.
Two unordered arguments are accepted, at least one should be passed:
- a model,
- a crudbuilder class
|
train
|
https://github.com/asifpy/django-crudbuilder/blob/9de1c6fa555086673dd7ccc351d4b771c6192489/crudbuilder/registry.py#L34-L61
|
[
"def _model_key(self, model, crudbuilder):\n app_label = model._meta.app_label\n model_name = model.__name__.lower()\n postfix_url = helpers.custom_postfix_url(crudbuilder(), model_name)\n return '{}-{}-{}'.format(app_label, model_name, postfix_url)\n"
] |
class CrudBuilderRegistry(dict):
"""Dictionary like object representing a collection of objects."""
@classmethod
def extract_args(cls, *args):
"""
Takes any arguments like a model and crud, or just one of
those, in any order, and return a model and crud.
"""
model = None
crudbuilder = None
for arg in args:
if issubclass(arg, models.Model):
model = arg
else:
crudbuilder = arg
return [model, crudbuilder]
def _model_key(self, model, crudbuilder):
app_label = model._meta.app_label
model_name = model.__name__.lower()
postfix_url = helpers.custom_postfix_url(crudbuilder(), model_name)
return '{}-{}-{}'.format(app_label, model_name, postfix_url)
def unregister(self, model):
key = self._model_key(model)
if key in self:
self.__delitem__(key)
def __getitem__(self, name):
"""
Return the CrudBuilder class registered for this name. If none is
registered, raise NotRegistered.
"""
try:
return super(CrudBuilderRegistry, self).__getitem__(name)
except KeyError:
raise NotRegistered(name, self)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.