repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.load_xml
|
python
|
def load_xml(self, filepath):
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
|
Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L548-L586
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.overlap
|
python
|
def overlap(self, spectrum):
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
|
Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L588-L638
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.plot
|
python
|
def plot(self, fig=None, draw=True):
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
|
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L640-L677
|
[
"def color_gen(colormap='viridis', key=None, n=15):\n \"\"\"Color generator for Bokeh plots\n\n Parameters\n ----------\n colormap: str, sequence\n The name of the color map\n\n Returns\n -------\n generator\n A generator for the color palette\n \"\"\"\n if colormap in dir(bpal):\n palette = getattr(bpal, colormap)\n\n if isinstance(palette, dict):\n if key is None:\n key = list(palette.keys())[0]\n palette = palette[key]\n\n elif callable(palette):\n palette = palette(n)\n\n else:\n raise TypeError(\"pallette must be a bokeh palette name or a sequence of color hex values.\")\n\n elif isinstance(colormap, (list, tuple)):\n palette = colormap\n\n else:\n raise TypeError(\"pallette must be a bokeh palette name or a sequence of color hex values.\")\n\n yield from itertools.cycle(palette)\n"
] |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.rsr
|
python
|
def rsr(self):
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
|
A getter for the relative spectral response (rsr) curve
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L680-L684
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.throughput
|
python
|
def throughput(self, points):
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
|
A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L692-L704
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.wave
|
python
|
def wave(self, wavelength):
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
|
A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L712-L725
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
def wave_units(self, units):
"""
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
"""
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
hover2pi/svo_filters
|
svo_filters/svo.py
|
Filter.wave_units
|
python
|
def wave_units(self, units):
# Make sure it's length units
if not units.is_equivalent(q.m):
raise ValueError(units, ": New wavelength units must be a length.")
# Update the units
self._wave_units = units
# Update all the wavelength values
self._wave = self.wave.to(self.wave_units).round(5)
self.wave_min = self.wave_min.to(self.wave_units).round(5)
self.wave_max = self.wave_max.to(self.wave_units).round(5)
self.wave_eff = self.wave_eff.to(self.wave_units).round(5)
self.wave_center = self.wave_center.to(self.wave_units).round(5)
self.wave_mean = self.wave_mean.to(self.wave_units).round(5)
self.wave_peak = self.wave_peak.to(self.wave_units).round(5)
self.wave_phot = self.wave_phot.to(self.wave_units).round(5)
self.wave_pivot = self.wave_pivot.to(self.wave_units).round(5)
self.width_eff = self.width_eff.to(self.wave_units).round(5)
self.fwhm = self.fwhm.to(self.wave_units).round(5)
|
A setter for the wavelength units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The wavelength units
|
train
|
https://github.com/hover2pi/svo_filters/blob/f0587c4908baf636d4bdf030fa95029e8f31b975/svo_filters/svo.py#L733-L760
| null |
class Filter:
"""
Creates a Filter object to store a photometric filter profile
and metadata
Attributes
----------
path: str
The absolute filepath for the bandpass data, an ASCII file with
a wavelength column in Angstroms and a response column of values
ranging from 0 to 1
refs: list, str
The references for the bandpass data
rsr: np.ndarray
The wavelength and relative spectral response (RSR) arrays
Band: str
The band name
CalibrationReference: str
The paper detailing the calibration
FWHM: float
The FWHM for the filter
Facility: str
The telescope facility
FilterProfileService: str
The SVO source
MagSys: str
The magnitude system
PhotCalID: str
The calibration standard
PhotSystem: str
The photometric system
ProfileReference: str
The SVO reference
WavelengthCen: float
The center wavelength
WavelengthEff: float
The effective wavelength
WavelengthMax: float
The maximum wavelength
WavelengthMean: float
The mean wavelength
WavelengthMin: float
The minimum wavelength
WavelengthPeak: float
The peak wavelength
WavelengthPhot: float
The photon distribution based effective wavelength
WavelengthPivot: float
The wavelength pivot
WavelengthUCD: str
The SVO wavelength unit
WavelengthUnit: str
The wavelength unit
WidthEff: float
The effective width
ZeroPoint: float
The value of the zero point flux
ZeroPointType: str
The system of the zero point
ZeroPointUnit: str
The units of the zero point
filterID: str
The SVO filter ID
"""
def __init__(self, band, filter_directory=None,
wave_units=q.um, flux_units=q.erg/q.s/q.cm**2/q.AA,
**kwargs):
"""
Loads the bandpass data into the Filter object
Parameters
----------
band: str
The bandpass filename (e.g. 2MASS.J)
filter_directory: str
The directory containing the filter files
wave_units: str, astropy.units.core.PrefixUnit (optional)
The wavelength units
flux_units: str, astropy.units.core.PrefixUnit (optional)
The zeropoint flux units
"""
if filter_directory is None:
filter_directory = resource_filename('svo_filters', 'data/filters/')
# Check if TopHat
if band.lower().replace('-', '').replace(' ', '') == 'tophat':
# check kwargs for limits
wave_min = kwargs.get('wave_min')
wave_max = kwargs.get('wave_max')
filepath = ''
if wave_min is None or wave_max is None:
raise ValueError("Please provide **{'wave_min', 'wave_max'} to create top hat filter.")
else:
# Load the filter
n_pix = kwargs.get('n_pixels', 100)
self.load_TopHat(wave_min, wave_max, n_pix)
else:
# Get list of filters
files = glob(filter_directory+'*')
no_ext = {f.replace('.txt', ''): f for f in files}
bands = [os.path.basename(b) for b in no_ext]
fp = os.path.join(filter_directory, band)
filepath = no_ext.get(fp, fp)
# If the filter is missing, ask what to do
if band not in bands:
err = """No filters match {}\n\nCurrent filters: {}\n\nA full list of available filters from the\nSVO Filter Profile Service can be found at\nhttp: //svo2.cab.inta-csic.es/theory/fps3/\n\nPlace the desired filter XML file in your\nfilter directory and try again.""".format(filepath, ', '.join(bands))
raise IOError(err)
# Get the first line to determine format
with open(filepath) as f:
top = f.readline()
# Read in XML file
if top.startswith('<?xml'):
self.load_xml(filepath)
# Read in txt file
elif filepath.endswith('.txt'):
self.load_txt(filepath)
else:
raise TypeError("File must be XML or ascii format.")
# Set the wavelength and throughput
self._wave_units = q.AA
self._wave = np.array([self.raw[0]]) * self.wave_units
self._throughput = np.array([self.raw[1]])
# Set n_bins and pixels_per_bin
self.n_bins = 1
self.pixels_per_bin = self.raw.shape[-1]
# Rename some values and apply units
self.wave_min = self.WavelengthMin * self.wave_units
self.wave_max = self.WavelengthMax * self.wave_units
self.wave_eff = self.WavelengthEff * self.wave_units
self.wave_center = self.WavelengthCen * self.wave_units
self.wave_mean = self.WavelengthMean * self.wave_units
self.wave_peak = self.WavelengthPeak * self.wave_units
self.wave_phot = self.WavelengthPhot * self.wave_units
self.wave_pivot = self.WavelengthPivot * self.wave_units
self.width_eff = self.WidthEff * self.wave_units
self.fwhm = self.FWHM * self.wave_units
self.zp = self.ZeroPoint * q.Unit(self.ZeroPointUnit)
# Delete redundant attributes
del self.WavelengthMin, self.WavelengthMax, self.WavelengthEff
del self.WavelengthCen, self.WavelengthMean, self.WavelengthPeak
del self.WavelengthPhot, self.WavelengthPivot, self.WidthEff, self.FWHM
del self.ZeroPointUnit, self.ZeroPoint
try:
del self.WavelengthUnit
except AttributeError:
pass
# Set the wavelength units
if wave_units is not None:
self.wave_units = wave_units
# Set zeropoint flux units
if flux_units is not None:
self._flux_units = self.zp.unit
self.flux_units = flux_units
# Get references
self.refs = []
try:
if isinstance(self.CalibrationReference, str):
self.refs = [self.CalibrationReference.split('=')[-1]]
except:
self.CalibrationReference = None
# Set a base name
self.name = self.filterID.split('/')[-1]
# Try to get the extinction vector R from Green et al. (2018)
self.ext_vector = EXTINCTION.get(self.name, 0)
# Set the systematic uncertainty (default 2 percent)
self.systematics = SYSTEMATICS.get(self.name, 0.02)
# Bin
if kwargs:
bwargs = {k: v for k, v in kwargs.items() if k in
inspect.signature(self.bin).parameters.keys()}
self.bin(**bwargs)
def apply(self, spectrum, plot=False):
"""
Apply the filter to the given [W, F], or [W, F, E] spectrum
Parameters
----------
spectrum: array-like
The wavelength [um] and flux of the spectrum
to apply the filter to
plot: bool
Plot the original and filtered spectrum
Returns
-------
np.ndarray
The filtered spectrum and error
"""
# Convert to filter units if possible
f_units = 1.
if hasattr(spectrum[0], 'unit'):
spectrum[0] = spectrum[0].to(self.wave_units)
if hasattr(spectrum[1], 'unit'):
spectrum[1] = spectrum[1].to(self.flux_units)
f_units = self.flux_units
if len(spectrum) >= 3 and hasattr(spectrum[2], 'unit'):
spectrum[2] = spectrum[2].to(self.flux_units)
# Make into iterable arrays
wav, flx, *err = [np.asarray(i) for i in spectrum]
# Check for error array
if len(err) == 0:
err = np.ones_like(flx)*np.nan
unc = False
else:
err = err[0]
unc = True
# Make flux 2D
if len(flx.shape) == 1:
flx = np.expand_dims(flx, axis=0)
err = np.expand_dims(err, axis=0)
# Make throughput 3D
rsr = np.copy(self.rsr)
# Make empty filtered arrays
filtered_flx = np.zeros((rsr.shape[0], flx.shape[0], rsr.shape[2]))
filtered_err = np.zeros_like(filtered_flx)
# Rebin the input spectra to the filter wavelength array
# and apply the RSR curve to the spectrum
for i, bn in enumerate(rsr):
for j, (f, e) in enumerate(zip(flx, err)):
filtered_flx[i][j] = np.interp(bn[0], wav, f, left=np.nan, right=np.nan)*bn[1]
filtered_err[i][j] = np.interp(bn[0], wav, e, left=np.nan, right=np.nan)*bn[1]
# Propagate the filter systematic uncertainties
if unc:
filtered_err += filtered_flx*self.systematics
if plot:
# Make the figure
COLORS = color_gen('Category10')
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Flux Density [{}]'.format(self.flux_units)
fig = figure(title=self.filterID, x_axis_label=xlab, y_axis_label=ylab)
# Plot the unfiltered spectrum
fig.line(wav, flx[0], legend='Input spectrum', color='black')
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(flx-err, (flx+err)[::-1])
fig.patch(band_x, band_y, color='black', fill_alpha=0.1, line_alpha=0)
# Plot each spectrum bin
for wav, bn, bne in zip(self.wave, filtered_flx, filtered_err):
color = next(COLORS)
fig.line(wav, bn[0], color=color)
# Plot the uncertainties
if unc:
band_x = np.append(wav, wav[::-1])
band_y = np.append(bn[0]-bne[0], (bn[0]+bne[0])[::-1])
fig.patch(band_x, band_y, color=color, fill_alpha=0.1, line_alpha=0)
show(fig)
return filtered_flx.squeeze()*f_units, filtered_err.squeeze()*f_units
def bin(self, n_bins=1, pixels_per_bin=None, wave_min=None, wave_max=None):
"""
Break the filter up into bins and apply a throughput to each bin,
useful for G141, G102, and other grisms
Parameters
----------
n_bins: int
The number of bins to dice the throughput curve into
pixels_per_bin: int (optional)
The number of channels per bin, which will be used
to calculate n_bins
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
"""
# Get wavelength limits
if wave_min is not None:
self.wave_min = wave_min
if wave_max is not None:
self.wave_max = wave_max
# Trim the wavelength by the given min and max
raw_wave = self.raw[0]
whr = np.logical_and(raw_wave * q.AA >= self.wave_min,
raw_wave * q.AA <= self.wave_max)
self.wave = (raw_wave[whr] * q.AA).to(self.wave_units)
self.throughput = self.raw[1][whr]
print('Bandpass trimmed to',
'{} - {}'.format(self.wave_min, self.wave_max))
# Calculate the number of bins and channels
pts = len(self.wave)
if isinstance(pixels_per_bin, int):
self.pixels_per_bin = pixels_per_bin
self.n_bins = int(pts/self.pixels_per_bin)
elif isinstance(n_bins, int):
self.n_bins = n_bins
self.pixels_per_bin = int(pts/self.n_bins)
else:
raise ValueError("Please specify 'n_bins' OR 'pixels_per_bin' as integers.")
print('{} bins of {} pixels each.'.format(self.n_bins,
self.pixels_per_bin))
# Trim throughput edges so that there are an integer number of bins
new_len = self.n_bins * self.pixels_per_bin
start = (pts - new_len) // 2
self.wave = self.wave[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
self.throughput = self.throughput[start:new_len+start].reshape(self.n_bins, self.pixels_per_bin)
@property
def centers(self):
"""A getter for the wavelength bin centers and average fluxes"""
# Get the bin centers
w_cen = np.nanmean(self.wave.value, axis=1)
f_cen = np.nanmean(self.throughput, axis=1)
return np.asarray([w_cen, f_cen])
@property
def flux_units(self):
"""A getter for the flux units"""
return self._flux_units
@flux_units.setter
def flux_units(self, units):
"""
A setter for the flux units
Parameters
----------
units: str, astropy.units.core.PrefixUnit
The desired units of the zeropoint flux density
"""
# Check that the units are valid
dtypes = (q.core.PrefixUnit, q.quantity.Quantity, q.core.CompositeUnit)
if not isinstance(units, dtypes):
raise ValueError(units, "units not understood.")
# Check that the units changed
if units != self.flux_units:
# Convert to new units
sfd = q.spectral_density(self.wave_eff)
self.zp = self.zp.to(units, equivalencies=sfd)
# Store new units
self._flux_units = units
def info(self, fetch=False):
"""
Print a table of info about the current filter
"""
# Get the info from the class
tp = (int, bytes, bool, str, float, tuple, list, np.ndarray)
info = [[k, str(v)] for k, v in vars(self).items() if isinstance(v, tp)
and k not in ['rsr', 'raw', 'centers'] and not k.startswith('_')]
# Make the table
table = at.Table(np.asarray(info).reshape(len(info), 2),
names=['Attributes', 'Values'])
# Sort and print
table.sort('Attributes')
if fetch:
return table
else:
table.pprint(max_width=-1, max_lines=-1, align=['>', '<'])
def load_TopHat(self, wave_min, wave_max, pixels_per_bin=100):
"""
Loads a top hat filter given wavelength min and max values
Parameters
----------
wave_min: astropy.units.quantity (optional)
The minimum wavelength to use
wave_max: astropy.units.quantity (optional)
The maximum wavelength to use
n_pixels: int
The number of pixels for the filter
"""
# Get min, max, effective wavelengths and width
self.pixels_per_bin = pixels_per_bin
self.n_bins = 1
self._wave_units = q.AA
wave_min = wave_min.to(self.wave_units)
wave_max = wave_max.to(self.wave_units)
# Create the RSR curve
self._wave = np.linspace(wave_min, wave_max, pixels_per_bin)
self._throughput = np.ones_like(self.wave)
self.raw = np.array([self.wave.value, self.throughput])
# Calculate the effective wavelength
wave_eff = ((wave_min + wave_max) / 2.).value
width = (wave_max - wave_min).value
# Add the attributes
self.path = ''
self.refs = ''
self.Band = 'Top Hat'
self.CalibrationReference = ''
self.FWHM = width
self.Facility = '-'
self.FilterProfileService = '-'
self.MagSys = '-'
self.PhotCalID = ''
self.PhotSystem = ''
self.ProfileReference = ''
self.WavelengthMin = wave_min.value
self.WavelengthMax = wave_max.value
self.WavelengthCen = wave_eff
self.WavelengthEff = wave_eff
self.WavelengthMean = wave_eff
self.WavelengthPeak = wave_eff
self.WavelengthPhot = wave_eff
self.WavelengthPivot = wave_eff
self.WavelengthUCD = ''
self.WidthEff = width
self.ZeroPoint = 0
self.ZeroPointType = ''
self.ZeroPointUnit = 'Jy'
self.filterID = 'Top Hat'
def load_txt(self, filepath):
"""Load the filter from a txt file
Parameters
----------
file: str
The filepath
"""
self.raw = np.genfromtxt(filepath, unpack=True)
# Convert to Angstroms if microns
if self.raw[0][-1] < 100:
self.raw[0] = self.raw[0] * 10000
self.WavelengthUnit = str(q.AA)
self.ZeroPointUnit = str(q.erg/q.s/q.cm**2/q.AA)
x, f = self.raw
# Get a spectrum of Vega
vega_file = resource_filename('svo_filters', 'data/spectra/vega.txt')
vega = np.genfromtxt(vega_file, unpack=True)[: 2]
vega[0] = vega[0] * 10000
vega = rebin_spec(vega, x)*q.erg/q.s/q.cm**2/q.AA
flam = np.trapz((vega[1]*f).to(q.erg/q.s/q.cm**2/q.AA), x=x)
thru = np.trapz(f, x=x)
self.ZeroPoint = (flam/thru).to(q.erg/q.s/q.cm**2/q.AA).value
# Calculate the filter's properties
self.filterID = os.path.splitext(os.path.basename(filepath))[0]
self.WavelengthPeak = np.max(self.raw[0])
f0 = f[: np.where(np.diff(f) > 0)[0][-1]]
x0 = x[: np.where(np.diff(f) > 0)[0][-1]]
self.WavelengthMin = np.interp(max(f)/100., f0, x0)
f1 = f[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
x1 = x[::-1][: np.where(np.diff(f[::-1]) > 0)[0][-1]]
self.WavelengthMax = np.interp(max(f)/100., f1, x1)
self.WavelengthEff = np.trapz(f*x*vega, x=x)/np.trapz(f*vega, x=x)
self.WavelengthMean = np.trapz(f*x, x=x)/np.trapz(f, x=x)
self.WidthEff = np.trapz(f, x=x)/f.max()
self.WavelengthPivot = np.sqrt(np.trapz(f, x=x)/np.trapz(f/x**2, x=x))
self.WavelengthPhot = np.trapz(f*vega*x**2, x=x)/np.trapz(f*vega*x, x=x)
# Half max stuff
halfmax = f.max()/2.
hm_x1 = x[f > halfmax][0]
hm_x2 = x[f > halfmax][-1]
self.FWHM = hm_x2 - hm_x1
self.WavelengthCen = (hm_x1 + hm_x2)/2.
# Add missing attributes
self.path = ''
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def load_xml(self, filepath):
"""Load the filter from a txt file
Parameters
----------
filepath: str
The filepath for the filter
"""
# Parse the XML file
vot = vo.parse_single_table(filepath)
self.raw = np.array([list(i) for i in vot.array]).T
# Parse the filter metadata
for p in [str(p).split() for p in vot.params]:
# Extract the key/value pairs
key = p[1].split('"')[1]
val = p[-1].split('"')[1]
# Do some formatting
flt1 = p[2].split('"')[1] == 'float'
flt2 = p[3].split('"')[1] == 'float'
if flt1 or flt2:
val = float(val)
else:
val = val.replace('b'', '')\
.replace('&apos', '')\
.replace('&', '&')\
.strip(';')
# Set the attribute
if key != 'Description':
setattr(self, key, val)
# Create some attributes
self.path = filepath
self.pixels_per_bin = self.raw.shape[-1]
self.n_bins = 1
def overlap(self, spectrum):
"""Tests for overlap of this filter with a spectrum
Example of full overlap:
|---------- spectrum ----------|
|------ self ------|
Examples of partial overlap: :
|---------- self ----------|
|------ spectrum ------|
|---- spectrum ----|
|----- self -----|
|---- self ----|
|---- spectrum ----|
Examples of no overlap: :
|---- spectrum ----| |---- other ----|
|---- other ----| |---- spectrum ----|
Parameters
----------
spectrum: sequence
The [W, F] spectrum with astropy units
Returns
-------
ans : {'full', 'partial', 'none'}
Overlap status.
"""
swave = self.wave[np.where(self.throughput != 0)]
s1, s2 = swave.min(), swave.max()
owave = spectrum[0]
o1, o2 = owave.min(), owave.max()
if (s1 >= o1 and s2 <= o2):
ans = 'full'
elif (s2 < o1) or (o2 < s1):
ans = 'none'
else:
ans = 'partial'
return ans
def plot(self, fig=None, draw=True):
"""
Plot the filter
Parameters
----------
fig: bokeh.plotting.figure (optional)
A figure to plot on
draw: bool
Draw the figure, else return it
Returns
-------
bokeh.plotting.figure
The filter figure
"""
COLORS = color_gen('Category10')
# Make the figure
if fig is None:
xlab = 'Wavelength [{}]'.format(self.wave_units)
ylab = 'Throughput'
title = self.filterID
fig = figure(title=title, x_axis_label=xlab, y_axis_label=ylab)
# Plot the raw curve
fig.line((self.raw[0]*q.AA).to(self.wave_units), self.raw[1],
alpha=0.1, line_width=8, color='black')
# Plot each with bin centers
for x, y in self.rsr:
fig.line(x, y, color=next(COLORS), line_width=2)
fig.circle(*self.centers, size=8, color='black')
if draw:
show(fig)
else:
return fig
@property
def rsr(self):
"""A getter for the relative spectral response (rsr) curve"""
arr = np.array([self.wave.value, self.throughput]).swapaxes(0, 1)
return arr
@property
def throughput(self):
"""A getter for the throughput"""
return self._throughput
@throughput.setter
def throughput(self, points):
"""A setter for the throughput
Parameters
----------
throughput: sequence
The array of throughput points
"""
# Test shape
if not points.shape == self.wave.shape:
raise ValueError("Throughput and wavelength must be same shape.")
self._throughput = points
@property
def wave(self):
"""A getter for the wavelength"""
return self._wave
@wave.setter
def wave(self, wavelength):
"""A setter for the wavelength
Parameters
----------
wavelength: astropy.units.quantity.Quantity
The array with units
"""
# Test units
if not isinstance(wavelength, q.quantity.Quantity):
raise ValueError("Wavelength must be in length units.")
self._wave = wavelength
self.wave_units = wavelength.unit
@property
def wave_units(self):
"""A getter for the wavelength units"""
return self._wave_units
@wave_units.setter
|
tweekmonster/moult
|
moult/utils.py
|
load_stdlib
|
python
|
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
|
Scans sys.path for standard library modules.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L18-L54
| null |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/utils.py
|
import_path_from_file
|
python
|
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
|
Returns a tuple of the import path and root module directory for the
supplied file.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L69-L85
| null |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/utils.py
|
file_containing_import
|
python
|
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
|
Finds the file that might contain the import_path.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L88-L107
|
[
"def load_stdlib():\n '''Scans sys.path for standard library modules.\n '''\n if _stdlib:\n return _stdlib\n\n prefixes = tuple({os.path.abspath(p) for p in (\n sys.prefix,\n getattr(sys, 'real_prefix', sys.prefix),\n getattr(sys, 'base_prefix', sys.prefix),\n )})\n\n for sp in sys.path:\n if not sp:\n continue\n _import_paths.append(os.path.abspath(sp))\n\n stdpaths = tuple({p for p in _import_paths\n if p.startswith(prefixes) and 'site-packages' not in p})\n\n _stdlib.update(sys.builtin_module_names)\n\n for stdpath in stdpaths:\n if not os.path.isdir(stdpath):\n continue\n\n for item in os.listdir(stdpath):\n if item.startswith('.') or item == 'site-packages':\n continue\n\n p = os.path.join(stdpath, item)\n if not os.path.isdir(p) and not item.endswith(('.py', '.so')):\n continue\n\n _stdlib.add(item.split('.', 1)[0])\n\n return _stdlib\n"
] |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/utils.py
|
resolve_import
|
python
|
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
|
Resolves relative imports from a module.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L110-L131
| null |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/utils.py
|
find_package
|
python
|
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
|
Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L134-L160
| null |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/utils.py
|
is_script
|
python
|
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
|
Checks if a file has a hashbang.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L163-L175
| null |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/utils.py
|
is_python_script
|
python
|
def is_python_script(filename):
'''Checks a file to see if it's a python script of some sort.
'''
if filename.lower().endswith('.py'):
return True
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
if fp.read(2) != b'#!':
return False
return re.match(r'.*python', str_(fp.readline()))
except IOError:
pass
return False
|
Checks a file to see if it's a python script of some sort.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/utils.py#L178-L195
| null |
import os
import re
import sys
from .classes import PyModule
from .pip_importer import *
from .compat import str_
_stdlib = set()
_import_paths = []
__all__ = ('dist_is_local', 'dist_in_usersite', 'get_installed_distributions',
'running_under_virtualenv', 'ignore_packages',
'search_packages_info', 'find_package')
def load_stdlib():
'''Scans sys.path for standard library modules.
'''
if _stdlib:
return _stdlib
prefixes = tuple({os.path.abspath(p) for p in (
sys.prefix,
getattr(sys, 'real_prefix', sys.prefix),
getattr(sys, 'base_prefix', sys.prefix),
)})
for sp in sys.path:
if not sp:
continue
_import_paths.append(os.path.abspath(sp))
stdpaths = tuple({p for p in _import_paths
if p.startswith(prefixes) and 'site-packages' not in p})
_stdlib.update(sys.builtin_module_names)
for stdpath in stdpaths:
if not os.path.isdir(stdpath):
continue
for item in os.listdir(stdpath):
if item.startswith('.') or item == 'site-packages':
continue
p = os.path.join(stdpath, item)
if not os.path.isdir(p) and not item.endswith(('.py', '.so')):
continue
_stdlib.add(item.split('.', 1)[0])
return _stdlib
load_stdlib()
def is_stdlib(module):
return module.split('.', 1)[0] in load_stdlib()
def is_import_str(text):
text = str_(text)
return re.match(r'^[\w\.]+$', text) and re.match(r'\w+\.\w+', text)
def import_path_from_file(filename, as_list=False):
'''Returns a tuple of the import path and root module directory for the
supplied file.
'''
module_path = []
basename = os.path.splitext(os.path.basename(filename))[0]
if basename != '__init__':
module_path.append(basename)
dirname = os.path.dirname(filename)
while os.path.isfile(os.path.join(dirname, '__init__.py')):
dirname, tail = os.path.split(dirname)
module_path.insert(0, tail)
if as_list:
return module_path, dirname
return '.'.join(module_path), dirname
def file_containing_import(import_path, import_root):
'''Finds the file that might contain the import_path.
'''
if not _import_paths:
load_stdlib()
if os.path.isfile(import_root):
import_root = os.path.dirname(import_root)
search_paths = [import_root] + _import_paths
module_parts = import_path.split('.')
for i in range(len(module_parts), 0, -1):
module_path = os.path.join(*module_parts[:i])
for sp in search_paths:
p = os.path.join(sp, module_path)
if os.path.isdir(p):
return os.path.join(p, '__init__.py')
elif os.path.isfile(p + '.py'):
return p + '.py'
return None
def resolve_import(import_path, from_module):
'''Resolves relative imports from a module.
'''
if not import_path or not import_path.startswith('.'):
return import_path
from_module = from_module.split('.')
dots = 0
for c in import_path:
if c == '.':
dots += 1
else:
break
if dots:
from_module = from_module[:-dots]
import_path = import_path[dots:]
if import_path:
from_module.append(import_path)
return '.'.join(from_module)
def find_package(name, installed, package=False):
'''Finds a package in the installed list.
If `package` is true, match package names, otherwise, match import paths.
'''
if package:
name = name.lower()
tests = (
lambda x: x.user and name == x.name.lower(),
lambda x: x.local and name == x.name.lower(),
lambda x: name == x.name.lower(),
)
else:
tests = (
lambda x: x.user and name in x.import_names,
lambda x: x.local and name in x.import_names,
lambda x: name in x.import_names,
)
for t in tests:
try:
found = list(filter(t, installed))
if found and not found[0].is_scan:
return found[0]
except StopIteration:
pass
return None
def is_script(filename):
'''Checks if a file has a hashbang.
'''
if not os.path.isfile(filename):
return False
try:
with open(filename, 'rb') as fp:
return fp.read(2) == b'#!'
except IOError:
pass
return False
def iter_dist_files(dist):
if dist.has_metadata('RECORD'):
for line in dist.get_metadata_lines('RECORD'):
line = line.split(',')[0]
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location, line))
elif dist.has_metadata('installed-files.txt'):
for line in dist.get_metadata_lines('installed-files.txt'):
if line.endswith('.pyc'):
continue
yield os.path.normpath(os.path.join(dist.location,
dist.egg_info, line))
def installed_packages(local=False):
installed = []
for dist in get_installed_distributions(local_only=local):
pym = PyModule(dist.project_name, dist.version, dist.location)
if dist.has_metadata('top_level.txt'):
pym.set_import_names(list(dist.get_metadata_lines('top_level.txt')))
pym.local = dist_is_local(dist)
pym.user = dist_in_usersite(dist)
pym._dependencies = [dep.project_name for dep in dist.requires()]
for filename in iter_dist_files(dist):
if not filename.startswith(dist.location):
if is_script(filename):
pym.installed_scripts.append(filename)
else:
pym.installed_files.append(filename)
if pym.installed_scripts or pym.name in ignore_packages:
pym.hidden = True
installed.append(pym)
for pym in installed[:]:
for dep in pym._dependencies:
if dep == 'argparse':
# Since I'm only testing with Python 2.7, skip any requirements
# for argparse.
continue
pymc = find_package(dep, installed, True)
if not pymc:
pymc = PyModule(dep, 'MISSING', missing=True)
installed.append(pymc)
pymc.add_dependant(pym)
pym.add_dependency(pymc)
return installed
|
tweekmonster/moult
|
moult/printer.py
|
output
|
python
|
def output(*args, **kwargs):
'''Analog of print() but with an indent option
'''
indent = kwargs.pop('indent', 0)
sep = kwargs.pop('sep', None)
kwargs['sep'] = u'' # Sanity
if sep is None:
sep = u' '
indent_str = u' ' * (indent * tab_width)
text = sep.join(map(str_, args))
color = kwargs.pop('color', None)
if color:
color.bright = kwargs.pop('bright', None)
text = ColorText(text, color)
print(indent_str + text, **kwargs)
|
Analog of print() but with an indent option
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/printer.py#L21-L35
| null |
from __future__ import print_function
import os
import sys
import time
from .utils import running_under_virtualenv
from .color import *
from .exceptions import MoultCommandError
from .compat import str_
from . import __version__
__all__ = ('enable_debug', 'output', 'error', 'wrap', 'print_module')
enable_debug = False
tab_width = 2
def error(message, fatal=False):
if fatal:
raise MoultCommandError(message)
output(ColorText(message, MEH), file=sys.stderr)
def wrap(items, prefix=0, width=80):
width -= prefix
lines = []
line_i = 0
line = ColorTextRun()
for i, item in enumerate(items):
if i and (line_i - 1 >= width or line_i + len(item) + 1 >= width):
line_i = 0
if len(lines):
indent = u' ' * prefix
else:
indent = u''
lines.append(str_(indent + line).rstrip())
line = ColorTextRun()
line += item + ', '
line_i += 2 + len(item)
if line:
if len(lines):
indent = u' ' * prefix
else:
indent = u''
lines.append(str_(indent + line).rstrip())
return u'\n'.join(lines).rstrip(', ')
def file_string(filename):
return ColorTextRun(os.path.dirname(filename),
os.path.sep,
ColorText(os.path.basename(filename), HEY))
def module_string(pym, require=False, plain=False):
s = pym.name
c = NEAT
if pym.is_scan:
c = MEH
elif pym.hidden:
c = SHHH
elif pym.local and running_under_virtualenv():
c = GOOD
elif not pym.local and running_under_virtualenv():
c = HEY
s = ColorText(s, c)
if pym.hidden and plain:
s = ColorText(u'_', HEY) + s
if plain:
return s
if require:
s += ColorTextRun(u'==', ColorText(pym.version, NEAT))
else:
s += ColorTextRun(u' (', ColorText(pym.version, NEAT), u')')
return s
def require_string(pym):
return module_string(pym, require=True)
def print_requires(pkg, show_all=False, printed=None):
if printed is None:
printed = set()
for dep in pkg.dependencies:
if not dep.dependants:
print_requires(dep, show_all=show_all, printed=printed)
for dep in pkg.dependencies:
print_requires(dep, show_all=show_all, printed=printed)
if pkg in printed:
return
printed.add(pkg)
if not pkg.is_scan:
if pkg.missing:
output('#', end=' ')
output(require_string(pkg))
def print_frozen(scans, show_all=False, printed=None):
output('# Requirements based on scans from:')
for pym in scans:
output('# {}'.format(pym.location))
date_str = time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime())
output('# Generated with moult {} at {}'.format(__version__, date_str))
printed = set()
for scan in scans:
print_requires(scan, show_all=show_all, printed=printed)
def print_module(pym, depth=0, indent_str=' ', printed=None, detail=False,
show_dependants=False, show_dependencies=False):
if not printed:
printed = []
if pym in printed:
return
printed.append(pym)
output(module_string(pym, not detail), indent=depth)
if detail:
loc = u'NOT INSTALLED'
if pym.is_scan:
loc = pym.location
elif pym.local and running_under_virtualenv():
loc = ColorText(u'VirtualEnv', YAY)
elif pym.user:
loc = ColorText(u'User', NEAT)
elif not pym.missing:
c = HEY.copy()
c.set_bright(True)
loc = ColorText(u'System', c)
rows = [(u'Location:', [loc])]
notes = []
if pym.hidden:
notes.append(ColorText(u'Hidden Package', SHHH))
if 'django' in pym.frameworks:
notes.append(ColorText(u'Contains Django project', NEAT))
if notes:
rows.append((u'Notes:', notes))
if pym.installed_scripts:
rows.append((u'Scripts:',
[file_string(x) for x in pym.installed_scripts]))
if pym.installed_files:
rows.append((u'Files:',
[file_string(x) for x in pym.installed_files]))
if pym.dependants:
rows.append((u'Used In:',
[module_string(x) for x in pym.dependants]))
if not pym.dependencies:
items = [ColorText(u'None', NEAT)]
else:
items = [module_string(x) for x in pym.dependencies]
rows.append((u'Requires:', items))
tab = max(map(lambda x: len(x[0]), rows))
for label, items in rows:
# Label width, tab width, and space
w_tab = tab + ((depth + 1) * tab_width) + 1
output(label.rjust(tab), wrap(items, w_tab), indent=depth + 1)
print('')
if show_dependants and pym.dependants:
for dep in pym.dependants:
print_module(dep, depth, detail=detail, printed=printed,
show_dependencies=show_dependencies)
if show_dependencies and pym.dependencies:
for dep in pym.dependencies:
print_module(dep, depth, detail=detail, printed=printed,
show_dependencies=show_dependencies)
|
tweekmonster/moult
|
moult/frameworks/django.py
|
scan_django_settings
|
python
|
def scan_django_settings(values, imports):
'''Recursively scans Django settings for values that appear to be
imported modules.
'''
if isinstance(values, (str, bytes)):
if utils.is_import_str(values):
imports.add(values)
elif isinstance(values, dict):
for k, v in values.items():
scan_django_settings(k, imports)
scan_django_settings(v, imports)
elif hasattr(values, '__file__') and getattr(values, '__file__'):
imp, _ = utils.import_path_from_file(getattr(values, '__file__'))
imports.add(imp)
elif hasattr(values, '__iter__'):
for item in values:
scan_django_settings(item, imports)
|
Recursively scans Django settings for values that appear to be
imported modules.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/frameworks/django.py#L21-L37
|
[
"def import_path_from_file(filename, as_list=False):\n '''Returns a tuple of the import path and root module directory for the\n supplied file.\n '''\n module_path = []\n basename = os.path.splitext(os.path.basename(filename))[0]\n if basename != '__init__':\n module_path.append(basename)\n\n dirname = os.path.dirname(filename)\n while os.path.isfile(os.path.join(dirname, '__init__.py')):\n dirname, tail = os.path.split(dirname)\n module_path.insert(0, tail)\n\n if as_list:\n return module_path, dirname\n return '.'.join(module_path), dirname\n",
"def scan_django_settings(values, imports):\n '''Recursively scans Django settings for values that appear to be\n imported modules.\n '''\n if isinstance(values, (str, bytes)):\n if utils.is_import_str(values):\n imports.add(values)\n elif isinstance(values, dict):\n for k, v in values.items():\n scan_django_settings(k, imports)\n scan_django_settings(v, imports)\n elif hasattr(values, '__file__') and getattr(values, '__file__'):\n imp, _ = utils.import_path_from_file(getattr(values, '__file__'))\n imports.add(imp)\n elif hasattr(values, '__iter__'):\n for item in values:\n scan_django_settings(item, imports)\n",
"def is_import_str(text):\n text = str_(text)\n return re.match(r'^[\\w\\.]+$', text) and re.match(r'\\w+\\.\\w+', text)\n"
] |
from __future__ import absolute_import, unicode_literals
import re
import os
import sys
import time
from .. import utils, log
_excluded_settings = (
'ALLOWED_HOSTS',
)
_filescan_modules = (
'django.db.backends',
'django.core.cache.backends',
)
def handle_django_settings(filename):
'''Attempts to load a Django project and get package dependencies from
settings.
Tested using Django 1.4 and 1.8. Not sure if some nuances are missed in
the other versions.
'''
old_sys_path = sys.path[:]
dirpath = os.path.dirname(filename)
project = os.path.basename(dirpath)
cwd = os.getcwd()
project_path = os.path.normpath(os.path.join(dirpath, '..'))
if project_path not in sys.path:
sys.path.insert(0, project_path)
os.chdir(project_path)
project_settings = '{}.settings'.format(project)
os.environ['DJANGO_SETTINGS_MODULE'] = project_settings
try:
import django
# Sanity
django.setup = lambda: False
except ImportError:
log.error('Found Django settings, but Django is not installed.')
return
log.warn('Loading Django Settings (Using {}): {}'
.format(django.get_version(), filename))
from django.conf import LazySettings
installed_apps = set()
settings_imports = set()
try:
settings = LazySettings()
settings._setup()
for k, v in vars(settings._wrapped).items():
if k not in _excluded_settings and re.match(r'^[A-Z_]+$', k):
# log.debug('Scanning Django setting: %s', k)
scan_django_settings(v, settings_imports)
# Manually scan INSTALLED_APPS since the broad scan won't include
# strings without a period in it .
for app in getattr(settings, 'INSTALLED_APPS', []):
if hasattr(app, '__file__') and getattr(app, '__file__'):
imp, _ = utils.import_path_from_file(getattr(app, '__file__'))
installed_apps.add(imp)
else:
installed_apps.add(app)
except Exception as e:
log.error('Could not load Django settings: %s', e)
log.debug('', exc_info=True)
return
if not installed_apps or not settings_imports:
log.error('Got empty settings values from Django settings.')
try:
from django.apps.registry import apps, Apps, AppRegistryNotReady
# Django doesn't like it when the initial instance of `apps` is reused,
# but it has to be populated before other instances can be created.
if not apps.apps_ready:
apps.populate(installed_apps)
else:
apps = Apps(installed_apps)
start = time.time()
while True:
try:
for app in apps.get_app_configs():
installed_apps.add(app.name)
except AppRegistryNotReady:
if time.time() - start > 10:
raise Exception('Bail out of waiting for Django')
log.debug('Waiting for apps to load...')
continue
break
except Exception as e:
log.debug('Could not use AppConfig: {}'.format(e))
# Restore before sub scans can occur
sys.path[:] = old_sys_path
os.chdir(cwd)
for item in settings_imports:
need_scan = item.startswith(_filescan_modules)
yield ('django', item, project_path if need_scan else None)
for app in installed_apps:
need_scan = app.startswith(project)
yield ('django', app, project_path if need_scan else None)
|
tweekmonster/moult
|
moult/frameworks/django.py
|
handle_django_settings
|
python
|
def handle_django_settings(filename):
'''Attempts to load a Django project and get package dependencies from
settings.
Tested using Django 1.4 and 1.8. Not sure if some nuances are missed in
the other versions.
'''
old_sys_path = sys.path[:]
dirpath = os.path.dirname(filename)
project = os.path.basename(dirpath)
cwd = os.getcwd()
project_path = os.path.normpath(os.path.join(dirpath, '..'))
if project_path not in sys.path:
sys.path.insert(0, project_path)
os.chdir(project_path)
project_settings = '{}.settings'.format(project)
os.environ['DJANGO_SETTINGS_MODULE'] = project_settings
try:
import django
# Sanity
django.setup = lambda: False
except ImportError:
log.error('Found Django settings, but Django is not installed.')
return
log.warn('Loading Django Settings (Using {}): {}'
.format(django.get_version(), filename))
from django.conf import LazySettings
installed_apps = set()
settings_imports = set()
try:
settings = LazySettings()
settings._setup()
for k, v in vars(settings._wrapped).items():
if k not in _excluded_settings and re.match(r'^[A-Z_]+$', k):
# log.debug('Scanning Django setting: %s', k)
scan_django_settings(v, settings_imports)
# Manually scan INSTALLED_APPS since the broad scan won't include
# strings without a period in it .
for app in getattr(settings, 'INSTALLED_APPS', []):
if hasattr(app, '__file__') and getattr(app, '__file__'):
imp, _ = utils.import_path_from_file(getattr(app, '__file__'))
installed_apps.add(imp)
else:
installed_apps.add(app)
except Exception as e:
log.error('Could not load Django settings: %s', e)
log.debug('', exc_info=True)
return
if not installed_apps or not settings_imports:
log.error('Got empty settings values from Django settings.')
try:
from django.apps.registry import apps, Apps, AppRegistryNotReady
# Django doesn't like it when the initial instance of `apps` is reused,
# but it has to be populated before other instances can be created.
if not apps.apps_ready:
apps.populate(installed_apps)
else:
apps = Apps(installed_apps)
start = time.time()
while True:
try:
for app in apps.get_app_configs():
installed_apps.add(app.name)
except AppRegistryNotReady:
if time.time() - start > 10:
raise Exception('Bail out of waiting for Django')
log.debug('Waiting for apps to load...')
continue
break
except Exception as e:
log.debug('Could not use AppConfig: {}'.format(e))
# Restore before sub scans can occur
sys.path[:] = old_sys_path
os.chdir(cwd)
for item in settings_imports:
need_scan = item.startswith(_filescan_modules)
yield ('django', item, project_path if need_scan else None)
for app in installed_apps:
need_scan = app.startswith(project)
yield ('django', app, project_path if need_scan else None)
|
Attempts to load a Django project and get package dependencies from
settings.
Tested using Django 1.4 and 1.8. Not sure if some nuances are missed in
the other versions.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/frameworks/django.py#L40-L132
|
[
"def import_path_from_file(filename, as_list=False):\n '''Returns a tuple of the import path and root module directory for the\n supplied file.\n '''\n module_path = []\n basename = os.path.splitext(os.path.basename(filename))[0]\n if basename != '__init__':\n module_path.append(basename)\n\n dirname = os.path.dirname(filename)\n while os.path.isfile(os.path.join(dirname, '__init__.py')):\n dirname, tail = os.path.split(dirname)\n module_path.insert(0, tail)\n\n if as_list:\n return module_path, dirname\n return '.'.join(module_path), dirname\n",
"def scan_django_settings(values, imports):\n '''Recursively scans Django settings for values that appear to be\n imported modules.\n '''\n if isinstance(values, (str, bytes)):\n if utils.is_import_str(values):\n imports.add(values)\n elif isinstance(values, dict):\n for k, v in values.items():\n scan_django_settings(k, imports)\n scan_django_settings(v, imports)\n elif hasattr(values, '__file__') and getattr(values, '__file__'):\n imp, _ = utils.import_path_from_file(getattr(values, '__file__'))\n imports.add(imp)\n elif hasattr(values, '__iter__'):\n for item in values:\n scan_django_settings(item, imports)\n"
] |
from __future__ import absolute_import, unicode_literals
import re
import os
import sys
import time
from .. import utils, log
_excluded_settings = (
'ALLOWED_HOSTS',
)
_filescan_modules = (
'django.db.backends',
'django.core.cache.backends',
)
def scan_django_settings(values, imports):
'''Recursively scans Django settings for values that appear to be
imported modules.
'''
if isinstance(values, (str, bytes)):
if utils.is_import_str(values):
imports.add(values)
elif isinstance(values, dict):
for k, v in values.items():
scan_django_settings(k, imports)
scan_django_settings(v, imports)
elif hasattr(values, '__file__') and getattr(values, '__file__'):
imp, _ = utils.import_path_from_file(getattr(values, '__file__'))
imports.add(imp)
elif hasattr(values, '__iter__'):
for item in values:
scan_django_settings(item, imports)
|
tweekmonster/moult
|
moult/filesystem_scanner.py
|
_scan_file
|
python
|
def _scan_file(filename, sentinel, source_type='import'):
'''Generator that performs the actual scanning of files.
Yeilds a tuple containing import type, import path, and an extra file
that should be scanned. Extra file scans should be the file or directory
that relates to the import name.
'''
filename = os.path.abspath(filename)
real_filename = os.path.realpath(filename)
if os.path.getsize(filename) <= max_file_size:
if real_filename not in sentinel and os.path.isfile(filename):
sentinel.add(real_filename)
basename = os.path.basename(filename)
scope, imports = ast_scan_file(filename)
if scope is not None and imports is not None:
for imp in imports:
yield (source_type, imp.module, None)
if 'INSTALLED_APPS' in scope and basename == 'settings.py':
log.info('Found Django settings: %s', filename)
for item in django.handle_django_settings(filename):
yield item
else:
log.warn('Could not scan imports from: %s', filename)
else:
log.warn('File size too large: %s', filename)
|
Generator that performs the actual scanning of files.
Yeilds a tuple containing import type, import path, and an extra file
that should be scanned. Extra file scans should be the file or directory
that relates to the import name.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L20-L48
| null |
import os
import re
from .classes import PyModule
from .ast_scanner import ast_scan_file
from .frameworks import django
from . import utils, log
max_directory_depth = 20
max_file_size = 1024 * 1204
# Common ignorable directories
_dir_ignore = re.compile(r'(\.(git|hg|svn|tox)|CVS|__pycache__)\b')
# Files to not even bother with scanning
_ext_ignore = re.compile(r'\.(pyc|html|js|css|zip|tar(\.gz)?|txt|swp|~|bak|db)$', re.I)
def _scan_directory(directory, sentinel, depth=0):
'''Basically os.listdir with some filtering.
'''
directory = os.path.abspath(directory)
real_directory = os.path.realpath(directory)
if depth < max_directory_depth and real_directory not in sentinel \
and os.path.isdir(directory):
sentinel.add(real_directory)
for item in os.listdir(directory):
if item in ('.', '..'):
# I'm not sure if this is even needed any more.
continue
p = os.path.abspath(os.path.join(directory, item))
if (os.path.isdir(p) and _dir_ignore.search(p)) \
or (os.path.isfile(p) and _ext_ignore.search(p)):
continue
yield p
def scan_file(pym, filename, sentinel, installed):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not utils.is_python_script(filename):
return
if not pym:
# This is for finding a previously created instance, not finding an
# installed module with the same name. Might need to base the name
# on the actual paths to reduce ambiguity in the printed scan results.
module = os.path.basename(filename)
pym = utils.find_package(module, installed)
if not pym:
pym = PyModule(module, 'SCRIPT', os.path.abspath(filename))
installed.insert(0, pym)
else:
pym.is_scan = True
for imp_type, import_path, extra_file_scan in _scan_file(filename, sentinel):
dep = utils.find_package(import_path, installed)
if dep:
dep.add_dependant(pym)
pym.add_dependency(dep)
if imp_type != 'import':
pym.add_framework(imp_type)
if extra_file_scan:
# extra_file_scan should be a directory or file containing the
# import name
scan_filename = utils.file_containing_import(import_path, extra_file_scan)
log.info('Related scan: %s - %s', import_path, scan_filename)
if scan_filename.endswith('__init__.py'):
scan_directory(pym, os.path.dirname(scan_filename), sentinel, installed)
else:
scan_file(pym, scan_filename, sentinel, installed)
return pym
def scan_directory(pym, directory, sentinel, installed, depth=0):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not pym:
d = os.path.abspath(directory)
basename = os.path.basename(d)
pym = utils.find_package(basename, installed)
if not pym:
version = 'DIRECTORY'
if os.path.isfile(os.path.join(d, '__init__.py')):
version = 'MODULE'
pym = PyModule(basename, version, d)
installed.insert(0, pym)
else:
pym.is_scan = True
# Keep track of how many file scans resulted in nothing
bad_scans = 0
for item in _scan_directory(directory, sentinel, depth):
if os.path.isfile(item):
if bad_scans > 100:
# Keep in mind this counter resets if it a good scan happens
# in *this* directory. If you have a module with more than 100
# files in a single directory, you should probably refactor it.
log.debug('Stopping scan of directory since it looks like a data dump: %s', directory)
break
if not scan_file(pym, item, sentinel, installed):
bad_scans += 1
else:
bad_scans = 0
elif os.path.isdir(item):
scan_directory(pym, item, sentinel, installed, depth + 1)
return pym
def scan(filename, installed, sentinel=None):
if not sentinel:
sentinel = set()
if os.path.isfile(filename):
return scan_file(None, filename, sentinel, installed)
elif os.path.isdir(filename):
return scan_directory(None, filename, sentinel, installed)
else:
log.error('Could not scan: %s', filename)
|
tweekmonster/moult
|
moult/filesystem_scanner.py
|
_scan_directory
|
python
|
def _scan_directory(directory, sentinel, depth=0):
'''Basically os.listdir with some filtering.
'''
directory = os.path.abspath(directory)
real_directory = os.path.realpath(directory)
if depth < max_directory_depth and real_directory not in sentinel \
and os.path.isdir(directory):
sentinel.add(real_directory)
for item in os.listdir(directory):
if item in ('.', '..'):
# I'm not sure if this is even needed any more.
continue
p = os.path.abspath(os.path.join(directory, item))
if (os.path.isdir(p) and _dir_ignore.search(p)) \
or (os.path.isfile(p) and _ext_ignore.search(p)):
continue
yield p
|
Basically os.listdir with some filtering.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L51-L71
| null |
import os
import re
from .classes import PyModule
from .ast_scanner import ast_scan_file
from .frameworks import django
from . import utils, log
max_directory_depth = 20
max_file_size = 1024 * 1204
# Common ignorable directories
_dir_ignore = re.compile(r'(\.(git|hg|svn|tox)|CVS|__pycache__)\b')
# Files to not even bother with scanning
_ext_ignore = re.compile(r'\.(pyc|html|js|css|zip|tar(\.gz)?|txt|swp|~|bak|db)$', re.I)
def _scan_file(filename, sentinel, source_type='import'):
'''Generator that performs the actual scanning of files.
Yeilds a tuple containing import type, import path, and an extra file
that should be scanned. Extra file scans should be the file or directory
that relates to the import name.
'''
filename = os.path.abspath(filename)
real_filename = os.path.realpath(filename)
if os.path.getsize(filename) <= max_file_size:
if real_filename not in sentinel and os.path.isfile(filename):
sentinel.add(real_filename)
basename = os.path.basename(filename)
scope, imports = ast_scan_file(filename)
if scope is not None and imports is not None:
for imp in imports:
yield (source_type, imp.module, None)
if 'INSTALLED_APPS' in scope and basename == 'settings.py':
log.info('Found Django settings: %s', filename)
for item in django.handle_django_settings(filename):
yield item
else:
log.warn('Could not scan imports from: %s', filename)
else:
log.warn('File size too large: %s', filename)
def scan_file(pym, filename, sentinel, installed):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not utils.is_python_script(filename):
return
if not pym:
# This is for finding a previously created instance, not finding an
# installed module with the same name. Might need to base the name
# on the actual paths to reduce ambiguity in the printed scan results.
module = os.path.basename(filename)
pym = utils.find_package(module, installed)
if not pym:
pym = PyModule(module, 'SCRIPT', os.path.abspath(filename))
installed.insert(0, pym)
else:
pym.is_scan = True
for imp_type, import_path, extra_file_scan in _scan_file(filename, sentinel):
dep = utils.find_package(import_path, installed)
if dep:
dep.add_dependant(pym)
pym.add_dependency(dep)
if imp_type != 'import':
pym.add_framework(imp_type)
if extra_file_scan:
# extra_file_scan should be a directory or file containing the
# import name
scan_filename = utils.file_containing_import(import_path, extra_file_scan)
log.info('Related scan: %s - %s', import_path, scan_filename)
if scan_filename.endswith('__init__.py'):
scan_directory(pym, os.path.dirname(scan_filename), sentinel, installed)
else:
scan_file(pym, scan_filename, sentinel, installed)
return pym
def scan_directory(pym, directory, sentinel, installed, depth=0):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not pym:
d = os.path.abspath(directory)
basename = os.path.basename(d)
pym = utils.find_package(basename, installed)
if not pym:
version = 'DIRECTORY'
if os.path.isfile(os.path.join(d, '__init__.py')):
version = 'MODULE'
pym = PyModule(basename, version, d)
installed.insert(0, pym)
else:
pym.is_scan = True
# Keep track of how many file scans resulted in nothing
bad_scans = 0
for item in _scan_directory(directory, sentinel, depth):
if os.path.isfile(item):
if bad_scans > 100:
# Keep in mind this counter resets if it a good scan happens
# in *this* directory. If you have a module with more than 100
# files in a single directory, you should probably refactor it.
log.debug('Stopping scan of directory since it looks like a data dump: %s', directory)
break
if not scan_file(pym, item, sentinel, installed):
bad_scans += 1
else:
bad_scans = 0
elif os.path.isdir(item):
scan_directory(pym, item, sentinel, installed, depth + 1)
return pym
def scan(filename, installed, sentinel=None):
if not sentinel:
sentinel = set()
if os.path.isfile(filename):
return scan_file(None, filename, sentinel, installed)
elif os.path.isdir(filename):
return scan_directory(None, filename, sentinel, installed)
else:
log.error('Could not scan: %s', filename)
|
tweekmonster/moult
|
moult/filesystem_scanner.py
|
scan_file
|
python
|
def scan_file(pym, filename, sentinel, installed):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not utils.is_python_script(filename):
return
if not pym:
# This is for finding a previously created instance, not finding an
# installed module with the same name. Might need to base the name
# on the actual paths to reduce ambiguity in the printed scan results.
module = os.path.basename(filename)
pym = utils.find_package(module, installed)
if not pym:
pym = PyModule(module, 'SCRIPT', os.path.abspath(filename))
installed.insert(0, pym)
else:
pym.is_scan = True
for imp_type, import_path, extra_file_scan in _scan_file(filename, sentinel):
dep = utils.find_package(import_path, installed)
if dep:
dep.add_dependant(pym)
pym.add_dependency(dep)
if imp_type != 'import':
pym.add_framework(imp_type)
if extra_file_scan:
# extra_file_scan should be a directory or file containing the
# import name
scan_filename = utils.file_containing_import(import_path, extra_file_scan)
log.info('Related scan: %s - %s', import_path, scan_filename)
if scan_filename.endswith('__init__.py'):
scan_directory(pym, os.path.dirname(scan_filename), sentinel, installed)
else:
scan_file(pym, scan_filename, sentinel, installed)
return pym
|
Entry point scan that creates a PyModule instance if needed.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L74-L111
|
[
"def _scan_file(filename, sentinel, source_type='import'):\n '''Generator that performs the actual scanning of files.\n\n Yeilds a tuple containing import type, import path, and an extra file\n that should be scanned. Extra file scans should be the file or directory\n that relates to the import name.\n '''\n filename = os.path.abspath(filename)\n real_filename = os.path.realpath(filename)\n\n if os.path.getsize(filename) <= max_file_size:\n if real_filename not in sentinel and os.path.isfile(filename):\n sentinel.add(real_filename)\n\n basename = os.path.basename(filename)\n scope, imports = ast_scan_file(filename)\n\n if scope is not None and imports is not None:\n for imp in imports:\n yield (source_type, imp.module, None)\n\n if 'INSTALLED_APPS' in scope and basename == 'settings.py':\n log.info('Found Django settings: %s', filename)\n for item in django.handle_django_settings(filename):\n yield item\n else:\n log.warn('Could not scan imports from: %s', filename)\n else:\n log.warn('File size too large: %s', filename)\n",
"def is_python_script(filename):\n '''Checks a file to see if it's a python script of some sort.\n '''\n if filename.lower().endswith('.py'):\n return True\n\n if not os.path.isfile(filename):\n return False\n\n try:\n with open(filename, 'rb') as fp:\n if fp.read(2) != b'#!':\n return False\n return re.match(r'.*python', str_(fp.readline()))\n except IOError:\n pass\n\n return False\n"
] |
import os
import re
from .classes import PyModule
from .ast_scanner import ast_scan_file
from .frameworks import django
from . import utils, log
max_directory_depth = 20
max_file_size = 1024 * 1204
# Common ignorable directories
_dir_ignore = re.compile(r'(\.(git|hg|svn|tox)|CVS|__pycache__)\b')
# Files to not even bother with scanning
_ext_ignore = re.compile(r'\.(pyc|html|js|css|zip|tar(\.gz)?|txt|swp|~|bak|db)$', re.I)
def _scan_file(filename, sentinel, source_type='import'):
'''Generator that performs the actual scanning of files.
Yeilds a tuple containing import type, import path, and an extra file
that should be scanned. Extra file scans should be the file or directory
that relates to the import name.
'''
filename = os.path.abspath(filename)
real_filename = os.path.realpath(filename)
if os.path.getsize(filename) <= max_file_size:
if real_filename not in sentinel and os.path.isfile(filename):
sentinel.add(real_filename)
basename = os.path.basename(filename)
scope, imports = ast_scan_file(filename)
if scope is not None and imports is not None:
for imp in imports:
yield (source_type, imp.module, None)
if 'INSTALLED_APPS' in scope and basename == 'settings.py':
log.info('Found Django settings: %s', filename)
for item in django.handle_django_settings(filename):
yield item
else:
log.warn('Could not scan imports from: %s', filename)
else:
log.warn('File size too large: %s', filename)
def _scan_directory(directory, sentinel, depth=0):
'''Basically os.listdir with some filtering.
'''
directory = os.path.abspath(directory)
real_directory = os.path.realpath(directory)
if depth < max_directory_depth and real_directory not in sentinel \
and os.path.isdir(directory):
sentinel.add(real_directory)
for item in os.listdir(directory):
if item in ('.', '..'):
# I'm not sure if this is even needed any more.
continue
p = os.path.abspath(os.path.join(directory, item))
if (os.path.isdir(p) and _dir_ignore.search(p)) \
or (os.path.isfile(p) and _ext_ignore.search(p)):
continue
yield p
def scan_directory(pym, directory, sentinel, installed, depth=0):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not pym:
d = os.path.abspath(directory)
basename = os.path.basename(d)
pym = utils.find_package(basename, installed)
if not pym:
version = 'DIRECTORY'
if os.path.isfile(os.path.join(d, '__init__.py')):
version = 'MODULE'
pym = PyModule(basename, version, d)
installed.insert(0, pym)
else:
pym.is_scan = True
# Keep track of how many file scans resulted in nothing
bad_scans = 0
for item in _scan_directory(directory, sentinel, depth):
if os.path.isfile(item):
if bad_scans > 100:
# Keep in mind this counter resets if it a good scan happens
# in *this* directory. If you have a module with more than 100
# files in a single directory, you should probably refactor it.
log.debug('Stopping scan of directory since it looks like a data dump: %s', directory)
break
if not scan_file(pym, item, sentinel, installed):
bad_scans += 1
else:
bad_scans = 0
elif os.path.isdir(item):
scan_directory(pym, item, sentinel, installed, depth + 1)
return pym
def scan(filename, installed, sentinel=None):
if not sentinel:
sentinel = set()
if os.path.isfile(filename):
return scan_file(None, filename, sentinel, installed)
elif os.path.isdir(filename):
return scan_directory(None, filename, sentinel, installed)
else:
log.error('Could not scan: %s', filename)
|
tweekmonster/moult
|
moult/filesystem_scanner.py
|
scan_directory
|
python
|
def scan_directory(pym, directory, sentinel, installed, depth=0):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not pym:
d = os.path.abspath(directory)
basename = os.path.basename(d)
pym = utils.find_package(basename, installed)
if not pym:
version = 'DIRECTORY'
if os.path.isfile(os.path.join(d, '__init__.py')):
version = 'MODULE'
pym = PyModule(basename, version, d)
installed.insert(0, pym)
else:
pym.is_scan = True
# Keep track of how many file scans resulted in nothing
bad_scans = 0
for item in _scan_directory(directory, sentinel, depth):
if os.path.isfile(item):
if bad_scans > 100:
# Keep in mind this counter resets if it a good scan happens
# in *this* directory. If you have a module with more than 100
# files in a single directory, you should probably refactor it.
log.debug('Stopping scan of directory since it looks like a data dump: %s', directory)
break
if not scan_file(pym, item, sentinel, installed):
bad_scans += 1
else:
bad_scans = 0
elif os.path.isdir(item):
scan_directory(pym, item, sentinel, installed, depth + 1)
return pym
|
Entry point scan that creates a PyModule instance if needed.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/filesystem_scanner.py#L114-L149
|
[
"def _scan_directory(directory, sentinel, depth=0):\n '''Basically os.listdir with some filtering.\n '''\n directory = os.path.abspath(directory)\n real_directory = os.path.realpath(directory)\n\n if depth < max_directory_depth and real_directory not in sentinel \\\n and os.path.isdir(directory):\n sentinel.add(real_directory)\n\n for item in os.listdir(directory):\n if item in ('.', '..'):\n # I'm not sure if this is even needed any more.\n continue\n\n p = os.path.abspath(os.path.join(directory, item))\n if (os.path.isdir(p) and _dir_ignore.search(p)) \\\n or (os.path.isfile(p) and _ext_ignore.search(p)):\n continue\n\n yield p\n",
"def scan_file(pym, filename, sentinel, installed):\n '''Entry point scan that creates a PyModule instance if needed.\n '''\n if not utils.is_python_script(filename):\n return\n\n if not pym:\n # This is for finding a previously created instance, not finding an\n # installed module with the same name. Might need to base the name\n # on the actual paths to reduce ambiguity in the printed scan results.\n module = os.path.basename(filename)\n pym = utils.find_package(module, installed)\n if not pym:\n pym = PyModule(module, 'SCRIPT', os.path.abspath(filename))\n installed.insert(0, pym)\n else:\n pym.is_scan = True\n\n for imp_type, import_path, extra_file_scan in _scan_file(filename, sentinel):\n dep = utils.find_package(import_path, installed)\n if dep:\n dep.add_dependant(pym)\n pym.add_dependency(dep)\n\n if imp_type != 'import':\n pym.add_framework(imp_type)\n\n if extra_file_scan:\n # extra_file_scan should be a directory or file containing the\n # import name\n scan_filename = utils.file_containing_import(import_path, extra_file_scan)\n log.info('Related scan: %s - %s', import_path, scan_filename)\n if scan_filename.endswith('__init__.py'):\n scan_directory(pym, os.path.dirname(scan_filename), sentinel, installed)\n else:\n scan_file(pym, scan_filename, sentinel, installed)\n\n return pym\n",
"def find_package(name, installed, package=False):\n '''Finds a package in the installed list.\n\n If `package` is true, match package names, otherwise, match import paths.\n '''\n if package:\n name = name.lower()\n tests = (\n lambda x: x.user and name == x.name.lower(),\n lambda x: x.local and name == x.name.lower(),\n lambda x: name == x.name.lower(),\n )\n else:\n tests = (\n lambda x: x.user and name in x.import_names,\n lambda x: x.local and name in x.import_names,\n lambda x: name in x.import_names,\n )\n\n for t in tests:\n try:\n found = list(filter(t, installed))\n if found and not found[0].is_scan:\n return found[0]\n except StopIteration:\n pass\n return None\n"
] |
import os
import re
from .classes import PyModule
from .ast_scanner import ast_scan_file
from .frameworks import django
from . import utils, log
max_directory_depth = 20
max_file_size = 1024 * 1204
# Common ignorable directories
_dir_ignore = re.compile(r'(\.(git|hg|svn|tox)|CVS|__pycache__)\b')
# Files to not even bother with scanning
_ext_ignore = re.compile(r'\.(pyc|html|js|css|zip|tar(\.gz)?|txt|swp|~|bak|db)$', re.I)
def _scan_file(filename, sentinel, source_type='import'):
'''Generator that performs the actual scanning of files.
Yeilds a tuple containing import type, import path, and an extra file
that should be scanned. Extra file scans should be the file or directory
that relates to the import name.
'''
filename = os.path.abspath(filename)
real_filename = os.path.realpath(filename)
if os.path.getsize(filename) <= max_file_size:
if real_filename not in sentinel and os.path.isfile(filename):
sentinel.add(real_filename)
basename = os.path.basename(filename)
scope, imports = ast_scan_file(filename)
if scope is not None and imports is not None:
for imp in imports:
yield (source_type, imp.module, None)
if 'INSTALLED_APPS' in scope and basename == 'settings.py':
log.info('Found Django settings: %s', filename)
for item in django.handle_django_settings(filename):
yield item
else:
log.warn('Could not scan imports from: %s', filename)
else:
log.warn('File size too large: %s', filename)
def _scan_directory(directory, sentinel, depth=0):
'''Basically os.listdir with some filtering.
'''
directory = os.path.abspath(directory)
real_directory = os.path.realpath(directory)
if depth < max_directory_depth and real_directory not in sentinel \
and os.path.isdir(directory):
sentinel.add(real_directory)
for item in os.listdir(directory):
if item in ('.', '..'):
# I'm not sure if this is even needed any more.
continue
p = os.path.abspath(os.path.join(directory, item))
if (os.path.isdir(p) and _dir_ignore.search(p)) \
or (os.path.isfile(p) and _ext_ignore.search(p)):
continue
yield p
def scan_file(pym, filename, sentinel, installed):
'''Entry point scan that creates a PyModule instance if needed.
'''
if not utils.is_python_script(filename):
return
if not pym:
# This is for finding a previously created instance, not finding an
# installed module with the same name. Might need to base the name
# on the actual paths to reduce ambiguity in the printed scan results.
module = os.path.basename(filename)
pym = utils.find_package(module, installed)
if not pym:
pym = PyModule(module, 'SCRIPT', os.path.abspath(filename))
installed.insert(0, pym)
else:
pym.is_scan = True
for imp_type, import_path, extra_file_scan in _scan_file(filename, sentinel):
dep = utils.find_package(import_path, installed)
if dep:
dep.add_dependant(pym)
pym.add_dependency(dep)
if imp_type != 'import':
pym.add_framework(imp_type)
if extra_file_scan:
# extra_file_scan should be a directory or file containing the
# import name
scan_filename = utils.file_containing_import(import_path, extra_file_scan)
log.info('Related scan: %s - %s', import_path, scan_filename)
if scan_filename.endswith('__init__.py'):
scan_directory(pym, os.path.dirname(scan_filename), sentinel, installed)
else:
scan_file(pym, scan_filename, sentinel, installed)
return pym
def scan(filename, installed, sentinel=None):
if not sentinel:
sentinel = set()
if os.path.isfile(filename):
return scan_file(None, filename, sentinel, installed)
elif os.path.isdir(filename):
return scan_directory(None, filename, sentinel, installed)
else:
log.error('Could not scan: %s', filename)
|
tweekmonster/moult
|
moult/ast_scanner.py
|
ast_value
|
python
|
def ast_value(val, scope, return_name=False):
'''Recursively parse out an AST value. This makes no attempt to load
modules or reconstruct functions on purpose. We do not want to
inadvertently call destructive code.
'''
# :TODO: refactor the hell out of this
try:
if isinstance(val, (ast.Assign, ast.Delete)):
if hasattr(val, 'value'):
value = ast_value(val.value, scope)
else:
value = None
for t in val.targets:
name = ast_value(t, scope, return_name=True)
if isinstance(t.ctx, ast.Del):
if name in scope:
scope.pop(name)
elif isinstance(t.ctx, ast.Store):
scope[name] = value
return
elif isinstance(val, ast.Expr) and isinstance(val.value, ast.Name):
return ast_value(val.value)
if isinstance(val, ast.Name):
if isinstance(val.ctx, ast.Load):
if val.id == 'None':
return None
elif val.id == 'True':
return True
elif val.id == 'False':
return False
if val.id in scope:
return scope[val.id]
if return_name:
return val.id
elif isinstance(val.ctx, ast.Store):
if return_name:
return val.id
return None
if isinstance(val, ast.Subscript):
toslice = ast_value(val.value, scope)
theslice = ast_value(val.slice, scope)
return toslice[theslice]
elif isinstance(val, ast.Index):
return ast_value(val.value, scope)
elif isinstance(val, ast.Slice):
lower = ast_value(val.lower)
upper = ast_value(val.upper)
step = ast_value(val.step)
return slice(lower, upper, step)
if isinstance(val, list):
return [ast_value(x, scope) for x in val]
elif isinstance(val, tuple):
return tuple(ast_value(x, scope) for x in val)
if isinstance(val, ast.Attribute):
name = ast_value(val.value, scope, return_name=True)
if isinstance(val.ctx, ast.Load):
return '.'.join((name, val.attr))
if return_name:
return name
elif isinstance(val, ast.keyword):
return {val.arg: ast_value(val.value, scope)}
elif isinstance(val, ast.List):
return [ast_value(x, scope) for x in val.elts]
elif isinstance(val, ast.Tuple):
return tuple(ast_value(x, scope) for x in val.elts)
elif isinstance(val, ast.Dict):
return dict(zip([ast_value(x, scope) for x in val.keys],
[ast_value(x, scope) for x in val.values]))
elif isinstance(val, ast.Num):
return val.n
elif isinstance(val, ast.Str):
return val.s
elif hasattr(ast, 'Bytes') and isinstance(val, ast.Bytes):
return bytes(val.s)
except Exception:
# Don't care, just return None
pass
return None
|
Recursively parse out an AST value. This makes no attempt to load
modules or reconstruct functions on purpose. We do not want to
inadvertently call destructive code.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/ast_scanner.py#L22-L106
|
[
"def ast_value(val, scope, return_name=False):\n '''Recursively parse out an AST value. This makes no attempt to load\n modules or reconstruct functions on purpose. We do not want to\n inadvertently call destructive code.\n '''\n # :TODO: refactor the hell out of this\n try:\n if isinstance(val, (ast.Assign, ast.Delete)):\n if hasattr(val, 'value'):\n value = ast_value(val.value, scope)\n else:\n value = None\n for t in val.targets:\n name = ast_value(t, scope, return_name=True)\n if isinstance(t.ctx, ast.Del):\n if name in scope:\n scope.pop(name)\n elif isinstance(t.ctx, ast.Store):\n scope[name] = value\n return\n elif isinstance(val, ast.Expr) and isinstance(val.value, ast.Name):\n return ast_value(val.value)\n\n if isinstance(val, ast.Name):\n if isinstance(val.ctx, ast.Load):\n if val.id == 'None':\n return None\n elif val.id == 'True':\n return True\n elif val.id == 'False':\n return False\n\n if val.id in scope:\n return scope[val.id]\n\n if return_name:\n return val.id\n elif isinstance(val.ctx, ast.Store):\n if return_name:\n return val.id\n return None\n\n if isinstance(val, ast.Subscript):\n toslice = ast_value(val.value, scope)\n theslice = ast_value(val.slice, scope)\n return toslice[theslice]\n elif isinstance(val, ast.Index):\n return ast_value(val.value, scope)\n elif isinstance(val, ast.Slice):\n lower = ast_value(val.lower)\n upper = ast_value(val.upper)\n step = ast_value(val.step)\n return slice(lower, upper, step)\n\n if isinstance(val, list):\n return [ast_value(x, scope) for x in val]\n elif isinstance(val, tuple):\n return tuple(ast_value(x, scope) for x in val)\n\n if isinstance(val, ast.Attribute):\n name = ast_value(val.value, scope, return_name=True)\n if isinstance(val.ctx, ast.Load):\n return '.'.join((name, val.attr))\n if return_name:\n return name\n elif isinstance(val, ast.keyword):\n return {val.arg: ast_value(val.value, scope)}\n elif isinstance(val, ast.List):\n return [ast_value(x, scope) for x in val.elts]\n elif isinstance(val, ast.Tuple):\n return tuple(ast_value(x, scope) for x in val.elts)\n elif isinstance(val, ast.Dict):\n return dict(zip([ast_value(x, scope) for x in val.keys],\n [ast_value(x, scope) for x in val.values]))\n elif isinstance(val, ast.Num):\n return val.n\n elif isinstance(val, ast.Str):\n return val.s\n elif hasattr(ast, 'Bytes') and isinstance(val, ast.Bytes):\n return bytes(val.s)\n except Exception:\n # Don't care, just return None\n pass\n\n return None\n"
] |
from __future__ import unicode_literals
import io
import re
import ast
from .exceptions import MoultScannerError
from .compat import str_
from . import utils, log
_fallback_re = re.compile(r'''
^[\ \t]*(
from[\ \t]+[\w\.]+[\ \t]+import\s+\([\s\w,]+\)|
from[\ \t]+[\w\.]+[\ \t]+import[\ \t\w,]+|
import[\ \t]+\([\s\w,]+\)|
import[\ \t]+[\ \t\w,]+
)
''', re.VERBOSE | re.MULTILINE | re.UNICODE)
def flatten_call_args(args, kwlist, starargs, kwargs):
if starargs:
args.extend(starargs)
keywords = {}
for kw in kwlist:
keywords.update(kw)
if kwargs:
keywords.update(keywords)
return args, keywords
def get_args(args, kwargs, arg_names):
'''Get arguments as a dict.
'''
n_args = len(arg_names)
if len(args) + len(kwargs) > n_args:
raise MoultScannerError('Too many arguments supplied. Expected: {}'.format(n_args))
out_args = {}
for i, a in enumerate(args):
out_args[arg_names[i]] = a
for a in arg_names:
if a not in out_args:
out_args[a] = None
out_args.update(kwargs)
return out_args
def parse_programmatic_import(node, scope):
name = ast_value(node.func, scope, return_name=True)
if not name:
return []
args, kwargs = flatten_call_args(ast_value(node.args, scope),
ast_value(node.keywords, scope),
ast_value(node.starargs, scope),
ast_value(node.kwargs, scope))
imports = []
if name.endswith('__import__'):
func_args = get_args(args, kwargs, ['name', 'globals', 'locals',
'fromlist', 'level'])
log.debug('Found `__import__` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for __import__')
if func_args['fromlist']:
if not hasattr(func_args['fromlist'], '__iter__'):
raise MoultScannerError('__import__ fromlist is not iterable type')
for fromname in func_args['fromlist']:
imports.append((func_args['name'], fromname))
else:
imports.append((None, func_args['name']))
elif name.endswith('import_module'):
func_args = get_args(args, kwargs, ['name', 'package'])
log.debug('Found `import_module` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for import_module')
if func_args['package'] and not isinstance(func_args['package'], (bytes, str_)):
raise MoultScannerError('import_module package not string type')
imports.append((func_args['package'], func_args['name']))
return imports
class ResolvedImport(object):
def __init__(self, import_path, import_root):
module = import_path.split('.', 1)[0]
self.module = module
self.import_path = import_path
self.is_stdlib = utils.is_stdlib(module)
self.filename = None
if not self.is_stdlib:
self.filename = utils.file_containing_import(import_path, import_root)
def __repr__(self):
return '<ResolvedImport {} ({})>'.format(self.import_path, self.filename)
class ImportNodeVisitor(ast.NodeVisitor):
'''A simplistic AST visitor that looks for easily identified imports.
It can resolve simple assignment variables defined within the module.
'''
def reset(self, filename):
self.filename = filename
self.import_path, self.import_root = utils.import_path_from_file(filename)
def add_import(self, *names):
for module, name in names:
if module and module.startswith('.'):
module = utils.resolve_import(module, self.import_path)
elif not module:
module = ''
module = '.'.join((module, name.strip('.'))).strip('.')
if module not in self._imports:
self._imports.add(module)
self.imports.append(ResolvedImport(module, self.import_root))
def visit_Module(self, node):
log.debug('Resetting AST visitor with module path: %s', self.import_path)
self._imports = set()
self.imports = []
self.scope = {}
if node:
self.generic_visit(node)
def visit_Import(self, node):
for n in node.names:
self.add_import((n.name, ''))
self.generic_visit(node)
def visit_ImportFrom(self, node):
module = '{}{}'.format('.' * node.level, str_(node.module or ''))
for n in node.names:
self.add_import((module, n.name))
self.generic_visit(node)
def visit_Expr(self, node):
if isinstance(node.value, ast.Call):
try:
self.add_import(*parse_programmatic_import(node.value, self.scope))
except MoultScannerError as e:
log.debug('%s, File: %s', e, self.filename)
elif isinstance(node.value, ast.Name):
ast_value(node.value, self.scope)
self.generic_visit(node)
def visit_Assign(self, node):
ast_value(node, self.scope)
def visit_Delete(self, node):
ast_value(node, self.scope)
def visit(self, node):
super(ImportNodeVisitor, self).visit(node)
ast_visitor = ImportNodeVisitor()
def _ast_scan_file_re(filename):
try:
with io.open(filename, 'rt', encoding='utf8') as fp:
script = fp.read()
normalized = ''
for imp in _fallback_re.finditer(script):
imp_line = imp.group(1)
try:
imp_line = imp_line.decode('utf8')
except AttributeError:
pass
except UnicodeEncodeError:
log.warn('Unicode import failed: %s', imp_line)
continue
imp_line = re.sub(r'[\(\)]', '', imp_line)
normalized += ' '.join(imp_line.split()).strip(',') + '\n'
log.debug('Normalized imports:\n%s', normalized)
try:
root = ast.parse(normalized, filename=filename)
except SyntaxError:
log.error('Could not parse file using regex scan: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan (regex): %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
def ast_scan_file(filename, re_fallback=True):
'''Scans a file for imports using AST.
In addition to normal imports, try to get imports via `__import__`
or `import_module` calls. The AST parser should be able to resolve
simple variable assignments in cases where these functions are called
with variables instead of strings.
'''
try:
with io.open(filename, 'rb') as fp:
try:
root = ast.parse(fp.read(), filename=filename)
except (SyntaxError, IndentationError):
if re_fallback:
log.debug('Falling back to regex scanner')
return _ast_scan_file_re(filename)
else:
log.error('Could not parse file: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan: %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
log.debug('Project path: %s', ast_visitor.import_root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
|
tweekmonster/moult
|
moult/ast_scanner.py
|
get_args
|
python
|
def get_args(args, kwargs, arg_names):
'''Get arguments as a dict.
'''
n_args = len(arg_names)
if len(args) + len(kwargs) > n_args:
raise MoultScannerError('Too many arguments supplied. Expected: {}'.format(n_args))
out_args = {}
for i, a in enumerate(args):
out_args[arg_names[i]] = a
for a in arg_names:
if a not in out_args:
out_args[a] = None
out_args.update(kwargs)
return out_args
|
Get arguments as a dict.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/ast_scanner.py#L123-L139
| null |
from __future__ import unicode_literals
import io
import re
import ast
from .exceptions import MoultScannerError
from .compat import str_
from . import utils, log
_fallback_re = re.compile(r'''
^[\ \t]*(
from[\ \t]+[\w\.]+[\ \t]+import\s+\([\s\w,]+\)|
from[\ \t]+[\w\.]+[\ \t]+import[\ \t\w,]+|
import[\ \t]+\([\s\w,]+\)|
import[\ \t]+[\ \t\w,]+
)
''', re.VERBOSE | re.MULTILINE | re.UNICODE)
def ast_value(val, scope, return_name=False):
'''Recursively parse out an AST value. This makes no attempt to load
modules or reconstruct functions on purpose. We do not want to
inadvertently call destructive code.
'''
# :TODO: refactor the hell out of this
try:
if isinstance(val, (ast.Assign, ast.Delete)):
if hasattr(val, 'value'):
value = ast_value(val.value, scope)
else:
value = None
for t in val.targets:
name = ast_value(t, scope, return_name=True)
if isinstance(t.ctx, ast.Del):
if name in scope:
scope.pop(name)
elif isinstance(t.ctx, ast.Store):
scope[name] = value
return
elif isinstance(val, ast.Expr) and isinstance(val.value, ast.Name):
return ast_value(val.value)
if isinstance(val, ast.Name):
if isinstance(val.ctx, ast.Load):
if val.id == 'None':
return None
elif val.id == 'True':
return True
elif val.id == 'False':
return False
if val.id in scope:
return scope[val.id]
if return_name:
return val.id
elif isinstance(val.ctx, ast.Store):
if return_name:
return val.id
return None
if isinstance(val, ast.Subscript):
toslice = ast_value(val.value, scope)
theslice = ast_value(val.slice, scope)
return toslice[theslice]
elif isinstance(val, ast.Index):
return ast_value(val.value, scope)
elif isinstance(val, ast.Slice):
lower = ast_value(val.lower)
upper = ast_value(val.upper)
step = ast_value(val.step)
return slice(lower, upper, step)
if isinstance(val, list):
return [ast_value(x, scope) for x in val]
elif isinstance(val, tuple):
return tuple(ast_value(x, scope) for x in val)
if isinstance(val, ast.Attribute):
name = ast_value(val.value, scope, return_name=True)
if isinstance(val.ctx, ast.Load):
return '.'.join((name, val.attr))
if return_name:
return name
elif isinstance(val, ast.keyword):
return {val.arg: ast_value(val.value, scope)}
elif isinstance(val, ast.List):
return [ast_value(x, scope) for x in val.elts]
elif isinstance(val, ast.Tuple):
return tuple(ast_value(x, scope) for x in val.elts)
elif isinstance(val, ast.Dict):
return dict(zip([ast_value(x, scope) for x in val.keys],
[ast_value(x, scope) for x in val.values]))
elif isinstance(val, ast.Num):
return val.n
elif isinstance(val, ast.Str):
return val.s
elif hasattr(ast, 'Bytes') and isinstance(val, ast.Bytes):
return bytes(val.s)
except Exception:
# Don't care, just return None
pass
return None
def flatten_call_args(args, kwlist, starargs, kwargs):
if starargs:
args.extend(starargs)
keywords = {}
for kw in kwlist:
keywords.update(kw)
if kwargs:
keywords.update(keywords)
return args, keywords
def parse_programmatic_import(node, scope):
name = ast_value(node.func, scope, return_name=True)
if not name:
return []
args, kwargs = flatten_call_args(ast_value(node.args, scope),
ast_value(node.keywords, scope),
ast_value(node.starargs, scope),
ast_value(node.kwargs, scope))
imports = []
if name.endswith('__import__'):
func_args = get_args(args, kwargs, ['name', 'globals', 'locals',
'fromlist', 'level'])
log.debug('Found `__import__` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for __import__')
if func_args['fromlist']:
if not hasattr(func_args['fromlist'], '__iter__'):
raise MoultScannerError('__import__ fromlist is not iterable type')
for fromname in func_args['fromlist']:
imports.append((func_args['name'], fromname))
else:
imports.append((None, func_args['name']))
elif name.endswith('import_module'):
func_args = get_args(args, kwargs, ['name', 'package'])
log.debug('Found `import_module` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for import_module')
if func_args['package'] and not isinstance(func_args['package'], (bytes, str_)):
raise MoultScannerError('import_module package not string type')
imports.append((func_args['package'], func_args['name']))
return imports
class ResolvedImport(object):
def __init__(self, import_path, import_root):
module = import_path.split('.', 1)[0]
self.module = module
self.import_path = import_path
self.is_stdlib = utils.is_stdlib(module)
self.filename = None
if not self.is_stdlib:
self.filename = utils.file_containing_import(import_path, import_root)
def __repr__(self):
return '<ResolvedImport {} ({})>'.format(self.import_path, self.filename)
class ImportNodeVisitor(ast.NodeVisitor):
'''A simplistic AST visitor that looks for easily identified imports.
It can resolve simple assignment variables defined within the module.
'''
def reset(self, filename):
self.filename = filename
self.import_path, self.import_root = utils.import_path_from_file(filename)
def add_import(self, *names):
for module, name in names:
if module and module.startswith('.'):
module = utils.resolve_import(module, self.import_path)
elif not module:
module = ''
module = '.'.join((module, name.strip('.'))).strip('.')
if module not in self._imports:
self._imports.add(module)
self.imports.append(ResolvedImport(module, self.import_root))
def visit_Module(self, node):
log.debug('Resetting AST visitor with module path: %s', self.import_path)
self._imports = set()
self.imports = []
self.scope = {}
if node:
self.generic_visit(node)
def visit_Import(self, node):
for n in node.names:
self.add_import((n.name, ''))
self.generic_visit(node)
def visit_ImportFrom(self, node):
module = '{}{}'.format('.' * node.level, str_(node.module or ''))
for n in node.names:
self.add_import((module, n.name))
self.generic_visit(node)
def visit_Expr(self, node):
if isinstance(node.value, ast.Call):
try:
self.add_import(*parse_programmatic_import(node.value, self.scope))
except MoultScannerError as e:
log.debug('%s, File: %s', e, self.filename)
elif isinstance(node.value, ast.Name):
ast_value(node.value, self.scope)
self.generic_visit(node)
def visit_Assign(self, node):
ast_value(node, self.scope)
def visit_Delete(self, node):
ast_value(node, self.scope)
def visit(self, node):
super(ImportNodeVisitor, self).visit(node)
ast_visitor = ImportNodeVisitor()
def _ast_scan_file_re(filename):
try:
with io.open(filename, 'rt', encoding='utf8') as fp:
script = fp.read()
normalized = ''
for imp in _fallback_re.finditer(script):
imp_line = imp.group(1)
try:
imp_line = imp_line.decode('utf8')
except AttributeError:
pass
except UnicodeEncodeError:
log.warn('Unicode import failed: %s', imp_line)
continue
imp_line = re.sub(r'[\(\)]', '', imp_line)
normalized += ' '.join(imp_line.split()).strip(',') + '\n'
log.debug('Normalized imports:\n%s', normalized)
try:
root = ast.parse(normalized, filename=filename)
except SyntaxError:
log.error('Could not parse file using regex scan: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan (regex): %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
def ast_scan_file(filename, re_fallback=True):
'''Scans a file for imports using AST.
In addition to normal imports, try to get imports via `__import__`
or `import_module` calls. The AST parser should be able to resolve
simple variable assignments in cases where these functions are called
with variables instead of strings.
'''
try:
with io.open(filename, 'rb') as fp:
try:
root = ast.parse(fp.read(), filename=filename)
except (SyntaxError, IndentationError):
if re_fallback:
log.debug('Falling back to regex scanner')
return _ast_scan_file_re(filename)
else:
log.error('Could not parse file: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan: %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
log.debug('Project path: %s', ast_visitor.import_root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
|
tweekmonster/moult
|
moult/ast_scanner.py
|
ast_scan_file
|
python
|
def ast_scan_file(filename, re_fallback=True):
'''Scans a file for imports using AST.
In addition to normal imports, try to get imports via `__import__`
or `import_module` calls. The AST parser should be able to resolve
simple variable assignments in cases where these functions are called
with variables instead of strings.
'''
try:
with io.open(filename, 'rb') as fp:
try:
root = ast.parse(fp.read(), filename=filename)
except (SyntaxError, IndentationError):
if re_fallback:
log.debug('Falling back to regex scanner')
return _ast_scan_file_re(filename)
else:
log.error('Could not parse file: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan: %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
log.debug('Project path: %s', ast_visitor.import_root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
|
Scans a file for imports using AST.
In addition to normal imports, try to get imports via `__import__`
or `import_module` calls. The AST parser should be able to resolve
simple variable assignments in cases where these functions are called
with variables instead of strings.
|
train
|
https://github.com/tweekmonster/moult/blob/38d3a3b9002336219897ebe263ca1d8dcadbecf5/moult/ast_scanner.py#L291-L319
|
[
"def _ast_scan_file_re(filename):\n try:\n with io.open(filename, 'rt', encoding='utf8') as fp:\n script = fp.read()\n normalized = ''\n for imp in _fallback_re.finditer(script):\n imp_line = imp.group(1)\n try:\n imp_line = imp_line.decode('utf8')\n except AttributeError:\n pass\n except UnicodeEncodeError:\n log.warn('Unicode import failed: %s', imp_line)\n continue\n imp_line = re.sub(r'[\\(\\)]', '', imp_line)\n normalized += ' '.join(imp_line.split()).strip(',') + '\\n'\n log.debug('Normalized imports:\\n%s', normalized)\n\n try:\n root = ast.parse(normalized, filename=filename)\n except SyntaxError:\n log.error('Could not parse file using regex scan: %s', filename)\n log.info('Exception:', exc_info=True)\n return None, None\n\n log.debug('Starting AST Scan (regex): %s', filename)\n ast_visitor.reset(filename)\n ast_visitor.visit(root)\n return ast_visitor.scope, ast_visitor.imports\n except IOError:\n log.warn('Could not open file: %s', filename)\n\n return None, None\n",
"def reset(self, filename):\n self.filename = filename\n self.import_path, self.import_root = utils.import_path_from_file(filename)\n",
"def visit(self, node):\n super(ImportNodeVisitor, self).visit(node)\n"
] |
from __future__ import unicode_literals
import io
import re
import ast
from .exceptions import MoultScannerError
from .compat import str_
from . import utils, log
_fallback_re = re.compile(r'''
^[\ \t]*(
from[\ \t]+[\w\.]+[\ \t]+import\s+\([\s\w,]+\)|
from[\ \t]+[\w\.]+[\ \t]+import[\ \t\w,]+|
import[\ \t]+\([\s\w,]+\)|
import[\ \t]+[\ \t\w,]+
)
''', re.VERBOSE | re.MULTILINE | re.UNICODE)
def ast_value(val, scope, return_name=False):
'''Recursively parse out an AST value. This makes no attempt to load
modules or reconstruct functions on purpose. We do not want to
inadvertently call destructive code.
'''
# :TODO: refactor the hell out of this
try:
if isinstance(val, (ast.Assign, ast.Delete)):
if hasattr(val, 'value'):
value = ast_value(val.value, scope)
else:
value = None
for t in val.targets:
name = ast_value(t, scope, return_name=True)
if isinstance(t.ctx, ast.Del):
if name in scope:
scope.pop(name)
elif isinstance(t.ctx, ast.Store):
scope[name] = value
return
elif isinstance(val, ast.Expr) and isinstance(val.value, ast.Name):
return ast_value(val.value)
if isinstance(val, ast.Name):
if isinstance(val.ctx, ast.Load):
if val.id == 'None':
return None
elif val.id == 'True':
return True
elif val.id == 'False':
return False
if val.id in scope:
return scope[val.id]
if return_name:
return val.id
elif isinstance(val.ctx, ast.Store):
if return_name:
return val.id
return None
if isinstance(val, ast.Subscript):
toslice = ast_value(val.value, scope)
theslice = ast_value(val.slice, scope)
return toslice[theslice]
elif isinstance(val, ast.Index):
return ast_value(val.value, scope)
elif isinstance(val, ast.Slice):
lower = ast_value(val.lower)
upper = ast_value(val.upper)
step = ast_value(val.step)
return slice(lower, upper, step)
if isinstance(val, list):
return [ast_value(x, scope) for x in val]
elif isinstance(val, tuple):
return tuple(ast_value(x, scope) for x in val)
if isinstance(val, ast.Attribute):
name = ast_value(val.value, scope, return_name=True)
if isinstance(val.ctx, ast.Load):
return '.'.join((name, val.attr))
if return_name:
return name
elif isinstance(val, ast.keyword):
return {val.arg: ast_value(val.value, scope)}
elif isinstance(val, ast.List):
return [ast_value(x, scope) for x in val.elts]
elif isinstance(val, ast.Tuple):
return tuple(ast_value(x, scope) for x in val.elts)
elif isinstance(val, ast.Dict):
return dict(zip([ast_value(x, scope) for x in val.keys],
[ast_value(x, scope) for x in val.values]))
elif isinstance(val, ast.Num):
return val.n
elif isinstance(val, ast.Str):
return val.s
elif hasattr(ast, 'Bytes') and isinstance(val, ast.Bytes):
return bytes(val.s)
except Exception:
# Don't care, just return None
pass
return None
def flatten_call_args(args, kwlist, starargs, kwargs):
if starargs:
args.extend(starargs)
keywords = {}
for kw in kwlist:
keywords.update(kw)
if kwargs:
keywords.update(keywords)
return args, keywords
def get_args(args, kwargs, arg_names):
'''Get arguments as a dict.
'''
n_args = len(arg_names)
if len(args) + len(kwargs) > n_args:
raise MoultScannerError('Too many arguments supplied. Expected: {}'.format(n_args))
out_args = {}
for i, a in enumerate(args):
out_args[arg_names[i]] = a
for a in arg_names:
if a not in out_args:
out_args[a] = None
out_args.update(kwargs)
return out_args
def parse_programmatic_import(node, scope):
name = ast_value(node.func, scope, return_name=True)
if not name:
return []
args, kwargs = flatten_call_args(ast_value(node.args, scope),
ast_value(node.keywords, scope),
ast_value(node.starargs, scope),
ast_value(node.kwargs, scope))
imports = []
if name.endswith('__import__'):
func_args = get_args(args, kwargs, ['name', 'globals', 'locals',
'fromlist', 'level'])
log.debug('Found `__import__` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for __import__')
if func_args['fromlist']:
if not hasattr(func_args['fromlist'], '__iter__'):
raise MoultScannerError('__import__ fromlist is not iterable type')
for fromname in func_args['fromlist']:
imports.append((func_args['name'], fromname))
else:
imports.append((None, func_args['name']))
elif name.endswith('import_module'):
func_args = get_args(args, kwargs, ['name', 'package'])
log.debug('Found `import_module` with args: {}'.format(func_args))
if not func_args['name']:
raise MoultScannerError('No name supplied for import_module')
if func_args['package'] and not isinstance(func_args['package'], (bytes, str_)):
raise MoultScannerError('import_module package not string type')
imports.append((func_args['package'], func_args['name']))
return imports
class ResolvedImport(object):
def __init__(self, import_path, import_root):
module = import_path.split('.', 1)[0]
self.module = module
self.import_path = import_path
self.is_stdlib = utils.is_stdlib(module)
self.filename = None
if not self.is_stdlib:
self.filename = utils.file_containing_import(import_path, import_root)
def __repr__(self):
return '<ResolvedImport {} ({})>'.format(self.import_path, self.filename)
class ImportNodeVisitor(ast.NodeVisitor):
'''A simplistic AST visitor that looks for easily identified imports.
It can resolve simple assignment variables defined within the module.
'''
def reset(self, filename):
self.filename = filename
self.import_path, self.import_root = utils.import_path_from_file(filename)
def add_import(self, *names):
for module, name in names:
if module and module.startswith('.'):
module = utils.resolve_import(module, self.import_path)
elif not module:
module = ''
module = '.'.join((module, name.strip('.'))).strip('.')
if module not in self._imports:
self._imports.add(module)
self.imports.append(ResolvedImport(module, self.import_root))
def visit_Module(self, node):
log.debug('Resetting AST visitor with module path: %s', self.import_path)
self._imports = set()
self.imports = []
self.scope = {}
if node:
self.generic_visit(node)
def visit_Import(self, node):
for n in node.names:
self.add_import((n.name, ''))
self.generic_visit(node)
def visit_ImportFrom(self, node):
module = '{}{}'.format('.' * node.level, str_(node.module or ''))
for n in node.names:
self.add_import((module, n.name))
self.generic_visit(node)
def visit_Expr(self, node):
if isinstance(node.value, ast.Call):
try:
self.add_import(*parse_programmatic_import(node.value, self.scope))
except MoultScannerError as e:
log.debug('%s, File: %s', e, self.filename)
elif isinstance(node.value, ast.Name):
ast_value(node.value, self.scope)
self.generic_visit(node)
def visit_Assign(self, node):
ast_value(node, self.scope)
def visit_Delete(self, node):
ast_value(node, self.scope)
def visit(self, node):
super(ImportNodeVisitor, self).visit(node)
ast_visitor = ImportNodeVisitor()
def _ast_scan_file_re(filename):
try:
with io.open(filename, 'rt', encoding='utf8') as fp:
script = fp.read()
normalized = ''
for imp in _fallback_re.finditer(script):
imp_line = imp.group(1)
try:
imp_line = imp_line.decode('utf8')
except AttributeError:
pass
except UnicodeEncodeError:
log.warn('Unicode import failed: %s', imp_line)
continue
imp_line = re.sub(r'[\(\)]', '', imp_line)
normalized += ' '.join(imp_line.split()).strip(',') + '\n'
log.debug('Normalized imports:\n%s', normalized)
try:
root = ast.parse(normalized, filename=filename)
except SyntaxError:
log.error('Could not parse file using regex scan: %s', filename)
log.info('Exception:', exc_info=True)
return None, None
log.debug('Starting AST Scan (regex): %s', filename)
ast_visitor.reset(filename)
ast_visitor.visit(root)
return ast_visitor.scope, ast_visitor.imports
except IOError:
log.warn('Could not open file: %s', filename)
return None, None
|
oemof/oemof.db
|
oemof/db/tools.py
|
get_polygon_from_nuts
|
python
|
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
|
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L40-L133
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
get_polygon_from_postgis
|
python
|
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
|
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L136-L231
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
tz_from_geom
|
python
|
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
|
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L234-L272
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
get_windzone
|
python
|
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
|
Find windzone from map.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L275-L291
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
create_empty_table_serial_primary
|
python
|
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
|
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L294-L330
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
grant_db_access
|
python
|
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
|
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L332-L351
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
add_primary_key
|
python
|
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
|
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L354-L372
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/tools.py
|
change_owner_to
|
python
|
def change_owner_to(conn, schema, table, role):
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
sql_str = """ALTER TABLE {schema}.{table}
OWNER TO {role};""".format(schema=schema,
table=table,
role=role)
conn.execute(sql_str)
|
r"""Changes table's ownership to role
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/tools.py#L375-L395
| null |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 17 11:08:15 2015
This is a collection of helper functions which work on there own an can be
used by various classes. If there are too many helper-functions, they will
be sorted in different modules.
All special import should be in try/except loops to avoid import errors.
"""
import logging
import pandas as pd
# get_polygon_from_nuts
hlp_fkt = 'get_polygon_from_nuts'
try:
from shapely.wkt import loads as wkt_loads
except:
logging.info(
'You will not be able to use the helper function: {0}'.format(hlp_fkt))
logging.info('Install shapely to use it.')
def get_polygons_from_table(conn, schema, table, g_col='geom', n_col='name'):
sql = '''
SELECT {n_col}, st_astext({g_col})
FROM {schema}.{table};
'''.format(
**{'n_col': n_col, 'g_col': g_col, 'schema': schema, 'table': table})
logging.debug(sql)
raw_data = conn.execute(sql).fetchall()
polygon_dc = {}
for d in raw_data:
polygon_dc[d[0]] = [d[0], wkt_loads(d[1])]
return polygon_dc
def get_polygon_from_nuts(conn, nuts):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
if isinstance(nuts, str):
nuts = [nuts, 'xyz']
logging.debug('Getting polygon from DB')
sql = '''
SELECT st_astext(ST_Transform(st_union(geom), 4326))
FROM oemof.geo_nuts_rg_2013
WHERE nuts_id in {0};
'''.format(tuple(nuts))
return wkt_loads(conn.execute(sql).fetchone()[0])
def get_polygon_from_postgis(conn, schema, table, gcol='geom', union=False):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
Long_variable_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a=[1,2,3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
# TODO@Günni
logging.debug('Getting polygon from DB table')
if union:
geo_string = 'st_union({0})'.format(gcol)
else:
geo_string = '{0}'.format(gcol)
sql = '''
SELECT st_astext(ST_Transform({geo_string}, 4326))
FROM {schema}.{table};
'''.format(**{'geo_string': geo_string, 'schema': schema, 'table': table})
return wkt_loads(conn.execute(sql).fetchone()[0])
def tz_from_geom(connection, geometry):
r"""Finding the timezone of a given point or polygon geometry, assuming
that the polygon is not crossing a border of a timezone. For a given point
or polygon geometry not located within the timezone dataset (e.g. sea) the
nearest timezone based on the bounding boxes of the geometries is returned.
Parameters
----------
connection : sqlalchemy connection object
A valid connection to a postigs database containing the timezone table
geometry : shapely geometry object
A point or polygon object. The polygon should not cross a timezone.
Returns
-------
string
Timezone using the naming of the IANA time zone database
References
----------
http://postgis.net/docs/manual-2.2/geometry_distance_box.html
"""
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT tzid FROM oemof_test.tz_world
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
if not connection.execute(sql).fetchone():
sql = """
SELECT tzid FROM oemof_test.tz_world
ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;
""".format(wkt=coords.wkt)
return connection.execute(sql).fetchone()[0]
def get_windzone(conn, geometry):
'Find windzone from map.'
# TODO@Günni
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
coords = geometry.centroid
else:
coords = geometry
sql = """
SELECT zone FROM oemof_test.windzones
WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));
""".format(wkt=coords.wkt)
zone = conn.execute(sql).fetchone()
if zone is not None:
zone = zone[0]
else:
zone = 0
return zone
def create_empty_table_serial_primary(conn, schema, table, columns=None,
id_col='id'):
r"""New database table with primary key type serial and empty columns
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
columns : list, optional
Columns that are to be created
id_col : str, optional
Name of index column of database table
Notes
-------
Currently all created table columns will be of type `double precision`.
Feel free to enhance this function by
by generalizing this aspect.
"""
sql_str = """CREATE TABLE {schema}.{table} ({id_col} SERIAL PRIMARY KEY
NOT NULL)
""".format(schema=schema, table=table, id_col=id_col)
conn.execute(sql_str)
# define more columns
if columns is not None:
for col in columns:
col_str = """alter table {schema}.{table} add column {col}
double precision;
""".format(schema=schema, table=table, col=col)
conn.execute(col_str)
def grant_db_access(conn, schema, table, role):
r"""Gives access to database users/ groups
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
role : str
database role that access is granted to
"""
grant_str = """GRANT ALL ON TABLE {schema}.{table}
TO {role} WITH GRANT OPTION;""".format(schema=schema, table=table,
role=role)
conn.execute(grant_str)
def add_primary_key(conn, schema, table, pk_col):
r"""Adds primary key to database table
Parameters
----------
conn : sqlalchemy connection object
A valid connection to a database
schema : str
The database schema
table : str
The database table
pk_col : str
Column that primary key is applied to
"""
sql_str = """alter table {schema}.{table} add primary key ({col})""".format(
schema=schema, table=table, col=pk_col)
conn.execute(sql_str)
def db_table2pandas(conn, schema, table, columns=None):
if columns is None:
columns = '*'
sql = "SELECT {0} FROM {1}.{2};".format(columns, schema, table)
logging.debug("SQL query: {0}".format(sql))
results = (conn.execute(sql))
columns = results.keys()
return pd.DataFrame(results.fetchall(), columns=columns)
|
oemof/oemof.db
|
oemof/db/coastdat.py
|
get_weather
|
python
|
def get_weather(conn, geometry, year):
r"""
Get the weather data for the given geometry and create an oemof
weather object.
"""
rename_dc = {
'ASWDIFD_S': 'dhi',
'ASWDIR_S': 'dirhi',
'PS': 'pressure',
'T_2M': 'temp_air',
'WSS_10M': 'v_wind',
'Z0': 'z0'}
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
# Create MultiWeather
# If polygon covers only one data set switch to SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_intersects(ST_GeomFromText('{wkt}',4326), sp.geom)
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_multi_weather(df, rename_dc)
elif geometry.geom_type == 'Point':
# Create SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_contains(sp.geom, ST_GeomFromText('{wkt}',4326))
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_single_weather(df, rename_dc)
else:
logging.error('Unknown geometry type: {0}'.format(geometry.geom_type))
obj = None
return obj
|
r"""
Get the weather data for the given geometry and create an oemof
weather object.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L14-L51
|
[
"def fetch_raw_data(sql, connection, geometry):\n \"\"\"\n Fetch the coastdat2 from the database, adapt it to the local time zone\n and create a time index.\n \"\"\"\n tmp_dc = {}\n weather_df = pd.DataFrame(\n connection.execute(sql).fetchall(), columns=[\n 'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series',\n 'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop(\n 'dat_id', 1)\n\n # Get the timezone of the geometry\n tz = tools.tz_from_geom(connection, geometry)\n\n for ix in weather_df.index:\n # Convert the point of the weather location to a shapely object\n weather_df.loc[ix, 'geom_point'] = wkt_loads(\n weather_df['geom_point'][ix])\n\n # Roll the dataset forward according to the timezone, because the\n # dataset is based on utc (Berlin +1, Kiev +2, London +0)\n utc = timezone('utc')\n offset = int(utc.localize(datetime(2002, 1, 1)).astimezone(\n timezone(tz)).strftime(\"%z\")[:-2])\n\n # Get the year and the length of the data array\n db_year = weather_df.loc[ix, 'year']\n db_len = len(weather_df['time_series'][ix])\n\n # Set absolute time index for the data sets to avoid errors.\n tmp_dc[ix] = pd.Series(\n np.roll(np.array(weather_df['time_series'][ix]), offset),\n index=pd.date_range(pd.datetime(db_year, 1, 1, 0),\n periods=db_len, freq='H', tz=tz))\n weather_df['time_series'] = pd.Series(tmp_dc)\n return weather_df\n",
"def sql_weather_string(year, sql_part):\n \"\"\"\n Creates an sql-string to read all datasets within a given geometry.\n \"\"\"\n # TODO@Günni. Replace sql-String with alchemy/GeoAlchemy\n # Create string parts for where conditions\n\n return '''\n SELECT tsptyti.*, y.leap\n FROM coastdat.year as y\n INNER JOIN (\n SELECT tsptyd.*, sc.time_id\n FROM coastdat.scheduled as sc\n INNER JOIN (\n SELECT tspty.*, dt.name, dt.height\n FROM coastdat.datatype as dt\n INNER JOIN (\n SELECT tsp.*, typ.type_id\n FROM coastdat.typified as typ\n INNER JOIN (\n SELECT spl.*, t.tsarray, t.id\n FROM coastdat.timeseries as t\n INNER JOIN (\n SELECT sps.*, l.data_id\n FROM (\n {sql_part}\n ) as sps\n INNER JOIN coastdat.located as l\n ON (sps.gid = l.spatial_id)) as spl\n ON (spl.data_id = t.id)) as tsp\n ON (tsp.id = typ.data_id)) as tspty\n ON (tspty.type_id = dt.id)) as tsptyd\n ON (tsptyd.id = sc.data_id))as tsptyti\n ON (tsptyti.time_id = y.year)\n where y.year = '{year}'\n ;'''.format(year=year, sql_part=sql_part)\n",
"def create_multi_weather(df, rename_dc):\n \"\"\"Create a list of oemof weather objects if the given geometry is a polygon\n \"\"\"\n weather_list = []\n # Create a pandas.DataFrame with the time series of the weather data set\n # for each data set and append them to a list.\n for gid in df.gid.unique():\n gid_df = df[df.gid == gid]\n obj = create_single_weather(gid_df, rename_dc)\n weather_list.append(obj)\n return weather_list\n",
"def create_single_weather(df, rename_dc):\n \"\"\"Create an oemof weather object for the given geometry\"\"\"\n my_weather = weather.FeedinWeather()\n data_height = {}\n name = None\n # Create a pandas.DataFrame with the time series of the weather data set\n weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)\n for row in df.iterrows():\n key = rename_dc[row[1].type]\n weather_df[key] = row[1].time_series\n data_height[key] = row[1].height if not np.isnan(row[1].height) else 0\n name = row[1].gid\n my_weather.data = weather_df\n my_weather.timezone = weather_df.index.tz\n my_weather.longitude = df.geom_point.iloc[0].x\n my_weather.latitude = df.geom_point.iloc[0].y\n my_weather.geometry = df.geom_point.iloc[0]\n my_weather.data_height = data_height\n my_weather.name = name\n return my_weather\n"
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
from pytz import timezone
from datetime import datetime
import feedinlib.weather as weather
from . import tools
from shapely.wkt import loads as wkt_loads
def sql_weather_string(year, sql_part):
"""
Creates an sql-string to read all datasets within a given geometry.
"""
# TODO@Günni. Replace sql-String with alchemy/GeoAlchemy
# Create string parts for where conditions
return '''
SELECT tsptyti.*, y.leap
FROM coastdat.year as y
INNER JOIN (
SELECT tsptyd.*, sc.time_id
FROM coastdat.scheduled as sc
INNER JOIN (
SELECT tspty.*, dt.name, dt.height
FROM coastdat.datatype as dt
INNER JOIN (
SELECT tsp.*, typ.type_id
FROM coastdat.typified as typ
INNER JOIN (
SELECT spl.*, t.tsarray, t.id
FROM coastdat.timeseries as t
INNER JOIN (
SELECT sps.*, l.data_id
FROM (
{sql_part}
) as sps
INNER JOIN coastdat.located as l
ON (sps.gid = l.spatial_id)) as spl
ON (spl.data_id = t.id)) as tsp
ON (tsp.id = typ.data_id)) as tspty
ON (tspty.type_id = dt.id)) as tsptyd
ON (tsptyd.id = sc.data_id))as tsptyti
ON (tsptyti.time_id = y.year)
where y.year = '{year}'
;'''.format(year=year, sql_part=sql_part)
def fetch_raw_data(sql, connection, geometry):
"""
Fetch the coastdat2 from the database, adapt it to the local time zone
and create a time index.
"""
tmp_dc = {}
weather_df = pd.DataFrame(
connection.execute(sql).fetchall(), columns=[
'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series',
'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop(
'dat_id', 1)
# Get the timezone of the geometry
tz = tools.tz_from_geom(connection, geometry)
for ix in weather_df.index:
# Convert the point of the weather location to a shapely object
weather_df.loc[ix, 'geom_point'] = wkt_loads(
weather_df['geom_point'][ix])
# Roll the dataset forward according to the timezone, because the
# dataset is based on utc (Berlin +1, Kiev +2, London +0)
utc = timezone('utc')
offset = int(utc.localize(datetime(2002, 1, 1)).astimezone(
timezone(tz)).strftime("%z")[:-2])
# Get the year and the length of the data array
db_year = weather_df.loc[ix, 'year']
db_len = len(weather_df['time_series'][ix])
# Set absolute time index for the data sets to avoid errors.
tmp_dc[ix] = pd.Series(
np.roll(np.array(weather_df['time_series'][ix]), offset),
index=pd.date_range(pd.datetime(db_year, 1, 1, 0),
periods=db_len, freq='H', tz=tz))
weather_df['time_series'] = pd.Series(tmp_dc)
return weather_df
def create_single_weather(df, rename_dc):
"""Create an oemof weather object for the given geometry"""
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather
def create_multi_weather(df, rename_dc):
"""Create a list of oemof weather objects if the given geometry is a polygon
"""
weather_list = []
# Create a pandas.DataFrame with the time series of the weather data set
# for each data set and append them to a list.
for gid in df.gid.unique():
gid_df = df[df.gid == gid]
obj = create_single_weather(gid_df, rename_dc)
weather_list.append(obj)
return weather_list
|
oemof/oemof.db
|
oemof/db/coastdat.py
|
fetch_raw_data
|
python
|
def fetch_raw_data(sql, connection, geometry):
tmp_dc = {}
weather_df = pd.DataFrame(
connection.execute(sql).fetchall(), columns=[
'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series',
'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop(
'dat_id', 1)
# Get the timezone of the geometry
tz = tools.tz_from_geom(connection, geometry)
for ix in weather_df.index:
# Convert the point of the weather location to a shapely object
weather_df.loc[ix, 'geom_point'] = wkt_loads(
weather_df['geom_point'][ix])
# Roll the dataset forward according to the timezone, because the
# dataset is based on utc (Berlin +1, Kiev +2, London +0)
utc = timezone('utc')
offset = int(utc.localize(datetime(2002, 1, 1)).astimezone(
timezone(tz)).strftime("%z")[:-2])
# Get the year and the length of the data array
db_year = weather_df.loc[ix, 'year']
db_len = len(weather_df['time_series'][ix])
# Set absolute time index for the data sets to avoid errors.
tmp_dc[ix] = pd.Series(
np.roll(np.array(weather_df['time_series'][ix]), offset),
index=pd.date_range(pd.datetime(db_year, 1, 1, 0),
periods=db_len, freq='H', tz=tz))
weather_df['time_series'] = pd.Series(tmp_dc)
return weather_df
|
Fetch the coastdat2 from the database, adapt it to the local time zone
and create a time index.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L92-L128
|
[
"def tz_from_geom(connection, geometry):\n r\"\"\"Finding the timezone of a given point or polygon geometry, assuming\n that the polygon is not crossing a border of a timezone. For a given point\n or polygon geometry not located within the timezone dataset (e.g. sea) the\n nearest timezone based on the bounding boxes of the geometries is returned.\n\n Parameters\n ----------\n connection : sqlalchemy connection object\n A valid connection to a postigs database containing the timezone table\n geometry : shapely geometry object\n A point or polygon object. The polygon should not cross a timezone.\n\n Returns\n -------\n string\n Timezone using the naming of the IANA time zone database\n\n References\n ----------\n http://postgis.net/docs/manual-2.2/geometry_distance_box.html\n \"\"\"\n\n # TODO@Günni\n if geometry.geom_type in ['Polygon', 'MultiPolygon']:\n coords = geometry.centroid\n else:\n coords = geometry\n sql = \"\"\"\n SELECT tzid FROM oemof_test.tz_world\n WHERE st_contains(geom, ST_PointFromText('{wkt}', 4326));\n \"\"\".format(wkt=coords.wkt)\n\n if not connection.execute(sql).fetchone():\n sql = \"\"\"\n SELECT tzid FROM oemof_test.tz_world\n ORDER BY ST_PointFromText('{wkt}', 4326) <#> geom LIMIT 1;\n \"\"\".format(wkt=coords.wkt)\n return connection.execute(sql).fetchone()[0]\n"
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
from pytz import timezone
from datetime import datetime
import feedinlib.weather as weather
from . import tools
from shapely.wkt import loads as wkt_loads
def get_weather(conn, geometry, year):
r"""
Get the weather data for the given geometry and create an oemof
weather object.
"""
rename_dc = {
'ASWDIFD_S': 'dhi',
'ASWDIR_S': 'dirhi',
'PS': 'pressure',
'T_2M': 'temp_air',
'WSS_10M': 'v_wind',
'Z0': 'z0'}
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
# Create MultiWeather
# If polygon covers only one data set switch to SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_intersects(ST_GeomFromText('{wkt}',4326), sp.geom)
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_multi_weather(df, rename_dc)
elif geometry.geom_type == 'Point':
# Create SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_contains(sp.geom, ST_GeomFromText('{wkt}',4326))
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_single_weather(df, rename_dc)
else:
logging.error('Unknown geometry type: {0}'.format(geometry.geom_type))
obj = None
return obj
def sql_weather_string(year, sql_part):
"""
Creates an sql-string to read all datasets within a given geometry.
"""
# TODO@Günni. Replace sql-String with alchemy/GeoAlchemy
# Create string parts for where conditions
return '''
SELECT tsptyti.*, y.leap
FROM coastdat.year as y
INNER JOIN (
SELECT tsptyd.*, sc.time_id
FROM coastdat.scheduled as sc
INNER JOIN (
SELECT tspty.*, dt.name, dt.height
FROM coastdat.datatype as dt
INNER JOIN (
SELECT tsp.*, typ.type_id
FROM coastdat.typified as typ
INNER JOIN (
SELECT spl.*, t.tsarray, t.id
FROM coastdat.timeseries as t
INNER JOIN (
SELECT sps.*, l.data_id
FROM (
{sql_part}
) as sps
INNER JOIN coastdat.located as l
ON (sps.gid = l.spatial_id)) as spl
ON (spl.data_id = t.id)) as tsp
ON (tsp.id = typ.data_id)) as tspty
ON (tspty.type_id = dt.id)) as tsptyd
ON (tsptyd.id = sc.data_id))as tsptyti
ON (tsptyti.time_id = y.year)
where y.year = '{year}'
;'''.format(year=year, sql_part=sql_part)
def create_single_weather(df, rename_dc):
"""Create an oemof weather object for the given geometry"""
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather
def create_multi_weather(df, rename_dc):
"""Create a list of oemof weather objects if the given geometry is a polygon
"""
weather_list = []
# Create a pandas.DataFrame with the time series of the weather data set
# for each data set and append them to a list.
for gid in df.gid.unique():
gid_df = df[df.gid == gid]
obj = create_single_weather(gid_df, rename_dc)
weather_list.append(obj)
return weather_list
|
oemof/oemof.db
|
oemof/db/coastdat.py
|
create_single_weather
|
python
|
def create_single_weather(df, rename_dc):
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather
|
Create an oemof weather object for the given geometry
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L131-L150
| null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
from pytz import timezone
from datetime import datetime
import feedinlib.weather as weather
from . import tools
from shapely.wkt import loads as wkt_loads
def get_weather(conn, geometry, year):
r"""
Get the weather data for the given geometry and create an oemof
weather object.
"""
rename_dc = {
'ASWDIFD_S': 'dhi',
'ASWDIR_S': 'dirhi',
'PS': 'pressure',
'T_2M': 'temp_air',
'WSS_10M': 'v_wind',
'Z0': 'z0'}
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
# Create MultiWeather
# If polygon covers only one data set switch to SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_intersects(ST_GeomFromText('{wkt}',4326), sp.geom)
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_multi_weather(df, rename_dc)
elif geometry.geom_type == 'Point':
# Create SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_contains(sp.geom, ST_GeomFromText('{wkt}',4326))
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_single_weather(df, rename_dc)
else:
logging.error('Unknown geometry type: {0}'.format(geometry.geom_type))
obj = None
return obj
def sql_weather_string(year, sql_part):
"""
Creates an sql-string to read all datasets within a given geometry.
"""
# TODO@Günni. Replace sql-String with alchemy/GeoAlchemy
# Create string parts for where conditions
return '''
SELECT tsptyti.*, y.leap
FROM coastdat.year as y
INNER JOIN (
SELECT tsptyd.*, sc.time_id
FROM coastdat.scheduled as sc
INNER JOIN (
SELECT tspty.*, dt.name, dt.height
FROM coastdat.datatype as dt
INNER JOIN (
SELECT tsp.*, typ.type_id
FROM coastdat.typified as typ
INNER JOIN (
SELECT spl.*, t.tsarray, t.id
FROM coastdat.timeseries as t
INNER JOIN (
SELECT sps.*, l.data_id
FROM (
{sql_part}
) as sps
INNER JOIN coastdat.located as l
ON (sps.gid = l.spatial_id)) as spl
ON (spl.data_id = t.id)) as tsp
ON (tsp.id = typ.data_id)) as tspty
ON (tspty.type_id = dt.id)) as tsptyd
ON (tsptyd.id = sc.data_id))as tsptyti
ON (tsptyti.time_id = y.year)
where y.year = '{year}'
;'''.format(year=year, sql_part=sql_part)
def fetch_raw_data(sql, connection, geometry):
"""
Fetch the coastdat2 from the database, adapt it to the local time zone
and create a time index.
"""
tmp_dc = {}
weather_df = pd.DataFrame(
connection.execute(sql).fetchall(), columns=[
'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series',
'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop(
'dat_id', 1)
# Get the timezone of the geometry
tz = tools.tz_from_geom(connection, geometry)
for ix in weather_df.index:
# Convert the point of the weather location to a shapely object
weather_df.loc[ix, 'geom_point'] = wkt_loads(
weather_df['geom_point'][ix])
# Roll the dataset forward according to the timezone, because the
# dataset is based on utc (Berlin +1, Kiev +2, London +0)
utc = timezone('utc')
offset = int(utc.localize(datetime(2002, 1, 1)).astimezone(
timezone(tz)).strftime("%z")[:-2])
# Get the year and the length of the data array
db_year = weather_df.loc[ix, 'year']
db_len = len(weather_df['time_series'][ix])
# Set absolute time index for the data sets to avoid errors.
tmp_dc[ix] = pd.Series(
np.roll(np.array(weather_df['time_series'][ix]), offset),
index=pd.date_range(pd.datetime(db_year, 1, 1, 0),
periods=db_len, freq='H', tz=tz))
weather_df['time_series'] = pd.Series(tmp_dc)
return weather_df
def create_multi_weather(df, rename_dc):
"""Create a list of oemof weather objects if the given geometry is a polygon
"""
weather_list = []
# Create a pandas.DataFrame with the time series of the weather data set
# for each data set and append them to a list.
for gid in df.gid.unique():
gid_df = df[df.gid == gid]
obj = create_single_weather(gid_df, rename_dc)
weather_list.append(obj)
return weather_list
|
oemof/oemof.db
|
oemof/db/coastdat.py
|
create_multi_weather
|
python
|
def create_multi_weather(df, rename_dc):
weather_list = []
# Create a pandas.DataFrame with the time series of the weather data set
# for each data set and append them to a list.
for gid in df.gid.unique():
gid_df = df[df.gid == gid]
obj = create_single_weather(gid_df, rename_dc)
weather_list.append(obj)
return weather_list
|
Create a list of oemof weather objects if the given geometry is a polygon
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/coastdat.py#L153-L163
|
[
"def create_single_weather(df, rename_dc):\n \"\"\"Create an oemof weather object for the given geometry\"\"\"\n my_weather = weather.FeedinWeather()\n data_height = {}\n name = None\n # Create a pandas.DataFrame with the time series of the weather data set\n weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)\n for row in df.iterrows():\n key = rename_dc[row[1].type]\n weather_df[key] = row[1].time_series\n data_height[key] = row[1].height if not np.isnan(row[1].height) else 0\n name = row[1].gid\n my_weather.data = weather_df\n my_weather.timezone = weather_df.index.tz\n my_weather.longitude = df.geom_point.iloc[0].x\n my_weather.latitude = df.geom_point.iloc[0].y\n my_weather.geometry = df.geom_point.iloc[0]\n my_weather.data_height = data_height\n my_weather.name = name\n return my_weather\n"
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
from pytz import timezone
from datetime import datetime
import feedinlib.weather as weather
from . import tools
from shapely.wkt import loads as wkt_loads
def get_weather(conn, geometry, year):
r"""
Get the weather data for the given geometry and create an oemof
weather object.
"""
rename_dc = {
'ASWDIFD_S': 'dhi',
'ASWDIR_S': 'dirhi',
'PS': 'pressure',
'T_2M': 'temp_air',
'WSS_10M': 'v_wind',
'Z0': 'z0'}
if geometry.geom_type in ['Polygon', 'MultiPolygon']:
# Create MultiWeather
# If polygon covers only one data set switch to SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_intersects(ST_GeomFromText('{wkt}',4326), sp.geom)
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_multi_weather(df, rename_dc)
elif geometry.geom_type == 'Point':
# Create SingleWeather
sql_part = """
SELECT sp.gid, ST_AsText(point.geom), ST_AsText(sp.geom)
FROM coastdat.cosmoclmgrid AS sp
JOIN coastdat.spatial AS point ON (sp.gid=point.gid)
WHERE st_contains(sp.geom, ST_GeomFromText('{wkt}',4326))
""".format(wkt=geometry.wkt)
df = fetch_raw_data(sql_weather_string(year, sql_part), conn, geometry)
obj = create_single_weather(df, rename_dc)
else:
logging.error('Unknown geometry type: {0}'.format(geometry.geom_type))
obj = None
return obj
def sql_weather_string(year, sql_part):
"""
Creates an sql-string to read all datasets within a given geometry.
"""
# TODO@Günni. Replace sql-String with alchemy/GeoAlchemy
# Create string parts for where conditions
return '''
SELECT tsptyti.*, y.leap
FROM coastdat.year as y
INNER JOIN (
SELECT tsptyd.*, sc.time_id
FROM coastdat.scheduled as sc
INNER JOIN (
SELECT tspty.*, dt.name, dt.height
FROM coastdat.datatype as dt
INNER JOIN (
SELECT tsp.*, typ.type_id
FROM coastdat.typified as typ
INNER JOIN (
SELECT spl.*, t.tsarray, t.id
FROM coastdat.timeseries as t
INNER JOIN (
SELECT sps.*, l.data_id
FROM (
{sql_part}
) as sps
INNER JOIN coastdat.located as l
ON (sps.gid = l.spatial_id)) as spl
ON (spl.data_id = t.id)) as tsp
ON (tsp.id = typ.data_id)) as tspty
ON (tspty.type_id = dt.id)) as tsptyd
ON (tsptyd.id = sc.data_id))as tsptyti
ON (tsptyti.time_id = y.year)
where y.year = '{year}'
;'''.format(year=year, sql_part=sql_part)
def fetch_raw_data(sql, connection, geometry):
"""
Fetch the coastdat2 from the database, adapt it to the local time zone
and create a time index.
"""
tmp_dc = {}
weather_df = pd.DataFrame(
connection.execute(sql).fetchall(), columns=[
'gid', 'geom_point', 'geom_polygon', 'data_id', 'time_series',
'dat_id', 'type_id', 'type', 'height', 'year', 'leap_year']).drop(
'dat_id', 1)
# Get the timezone of the geometry
tz = tools.tz_from_geom(connection, geometry)
for ix in weather_df.index:
# Convert the point of the weather location to a shapely object
weather_df.loc[ix, 'geom_point'] = wkt_loads(
weather_df['geom_point'][ix])
# Roll the dataset forward according to the timezone, because the
# dataset is based on utc (Berlin +1, Kiev +2, London +0)
utc = timezone('utc')
offset = int(utc.localize(datetime(2002, 1, 1)).astimezone(
timezone(tz)).strftime("%z")[:-2])
# Get the year and the length of the data array
db_year = weather_df.loc[ix, 'year']
db_len = len(weather_df['time_series'][ix])
# Set absolute time index for the data sets to avoid errors.
tmp_dc[ix] = pd.Series(
np.roll(np.array(weather_df['time_series'][ix]), offset),
index=pd.date_range(pd.datetime(db_year, 1, 1, 0),
periods=db_len, freq='H', tz=tz))
weather_df['time_series'] = pd.Series(tmp_dc)
return weather_df
def create_single_weather(df, rename_dc):
"""Create an oemof weather object for the given geometry"""
my_weather = weather.FeedinWeather()
data_height = {}
name = None
# Create a pandas.DataFrame with the time series of the weather data set
weather_df = pd.DataFrame(index=df.time_series.iloc[0].index)
for row in df.iterrows():
key = rename_dc[row[1].type]
weather_df[key] = row[1].time_series
data_height[key] = row[1].height if not np.isnan(row[1].height) else 0
name = row[1].gid
my_weather.data = weather_df
my_weather.timezone = weather_df.index.tz
my_weather.longitude = df.geom_point.iloc[0].x
my_weather.latitude = df.geom_point.iloc[0].y
my_weather.geometry = df.geom_point.iloc[0]
my_weather.data_height = data_height
my_weather.name = name
return my_weather
|
oemof/oemof.db
|
oemof/db/feedin_pg.py
|
Feedin.aggregate_cap_val
|
python
|
def aggregate_cap_val(self, conn, **kwargs):
'''
Returns the normalised feedin profile and installed capacity for
a given region.
Parameters
----------
region : Region instance
region.geom : shapely.geometry object
Geo-spatial data with information for location/region-shape. The
geometry can be a polygon/multi-polygon or a point.
Returns
-------
feedin_df : pandas dataframe
Dataframe containing the normalised feedin profile of the given
region. Index of the dataframe is the hour of the year; columns
are 'pv_pwr' and 'wind_pwr'.
cap : pandas series
Series containing the installed capacity (in W) of PV and wind
turbines of the given region.
'''
region = kwargs['region']
[pv_df, wind_df, cap] = self.get_timeseries(
conn,
geometry=region.geom,
**kwargs)
if kwargs.get('store', False):
self.store_full_df(pv_df, wind_df, **kwargs)
# Summerize the results to one column for pv and one for wind
cap = cap.sum()
df = pd.concat([pv_df.sum(axis=1) / cap['pv_pwr'],
wind_df.sum(axis=1) / cap['wind_pwr']], axis=1)
feedin_df = df.rename(columns={0: 'pv_pwr', 1: 'wind_pwr'})
return feedin_df, cap
|
Returns the normalised feedin profile and installed capacity for
a given region.
Parameters
----------
region : Region instance
region.geom : shapely.geometry object
Geo-spatial data with information for location/region-shape. The
geometry can be a polygon/multi-polygon or a point.
Returns
-------
feedin_df : pandas dataframe
Dataframe containing the normalised feedin profile of the given
region. Index of the dataframe is the hour of the year; columns
are 'pv_pwr' and 'wind_pwr'.
cap : pandas series
Series containing the installed capacity (in W) of PV and wind
turbines of the given region.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/feedin_pg.py#L21-L58
|
[
"def get_timeseries(self, conn, **kwargs):\n ''\n weather = coastdat.get_weather(\n conn, kwargs['geometry'], kwargs['year'])\n\n pv_df = 0\n pv_cap = {}\n wind_df = 0\n wind_cap = {}\n\n if not isinstance(weather, list):\n weather = [weather]\n\n for w_cell in weather:\n ee_pps = pg_pp.get_energymap_pps(\n conn, geometry1=w_cell.geometry, geometry2=kwargs['geometry'])\n\n # Find type of wind turbine and its parameters according to the\n # windzone.\n wz = tools.get_windzone(conn, w_cell.geometry)\n\n kwargs['wind_conv_type'] = (kwargs['wka_model_dc'].get(\n wz, kwargs['wka_model']))\n kwargs['d_rotor'] = (kwargs['d_rotor_dc'].get(\n wz, kwargs['d_rotor']))\n kwargs['h_hub'] = (kwargs['h_hub_dc'].get(wz, kwargs['h_hub']))\n\n # Determine the feedin time series for the weather cell\n # Wind energy\n wind_peak_power = ee_pps[ee_pps.type == 'wind_power'].cap.sum()\n wind_power_plant = pp.WindPowerPlant(**kwargs)\n wind_series = wind_power_plant.feedin(\n weather=w_cell, installed_capacity=wind_peak_power)\n wind_series.name = w_cell.name\n wind_cap[w_cell.name] = wind_peak_power\n\n # PV\n pv_peak_power = ee_pps[ee_pps.type == 'solar_power'].cap.sum()\n pv_plant = pp.Photovoltaic(**kwargs)\n pv_series = pv_plant.feedin(\n weather=w_cell, peak_power=pv_peak_power)\n pv_series.name = w_cell.name\n pv_cap[w_cell.name] = pv_peak_power\n\n # Combine the results to a DataFrame\n try:\n pv_df = pd.concat([pv_df, pv_series], axis=1)\n wind_df = pd.concat([wind_df, wind_series], axis=1)\n except:\n pv_df = pv_series.to_frame()\n wind_df = wind_series.to_frame()\n\n # Write capacity into a dataframe\n capw = pd.Series(pd.DataFrame.from_dict(wind_cap, orient='index')[0])\n capw.name = 'wind_pwr'\n cappv = pd.Series(pd.DataFrame.from_dict(pv_cap, orient='index')[0])\n cappv.name = 'pv_pwr'\n cap = pd.concat([capw, cappv], axis=1)\n\n return pv_df, wind_df, cap\n",
"def store_full_df(self, pv_df, wind_df, **kwargs):\n ''\n dpath = kwargs.get(\n 'dpath', path.join(path.expanduser(\"~\"), '.oemof'))\n filename = kwargs.get('filename', 'feedin_' + kwargs['region'].name)\n fullpath = path.join(dpath, filename)\n\n if kwargs['store'] == 'hf5':\n pv_df.to_hdf(fullpath + '.hf5', 'pv_pwr')\n wind_df.to_hdf(fullpath + '.hf5', 'wind_pwr')\n\n if kwargs['store'] == 'csv':\n pv_df.to_csv(fullpath + '_pv.csv')\n wind_df.to_csv(fullpath + '_wind.csv')\n"
] |
class Feedin:
''
def __init__(self):
''
pass
def get_timeseries(self, conn, **kwargs):
''
weather = coastdat.get_weather(
conn, kwargs['geometry'], kwargs['year'])
pv_df = 0
pv_cap = {}
wind_df = 0
wind_cap = {}
if not isinstance(weather, list):
weather = [weather]
for w_cell in weather:
ee_pps = pg_pp.get_energymap_pps(
conn, geometry1=w_cell.geometry, geometry2=kwargs['geometry'])
# Find type of wind turbine and its parameters according to the
# windzone.
wz = tools.get_windzone(conn, w_cell.geometry)
kwargs['wind_conv_type'] = (kwargs['wka_model_dc'].get(
wz, kwargs['wka_model']))
kwargs['d_rotor'] = (kwargs['d_rotor_dc'].get(
wz, kwargs['d_rotor']))
kwargs['h_hub'] = (kwargs['h_hub_dc'].get(wz, kwargs['h_hub']))
# Determine the feedin time series for the weather cell
# Wind energy
wind_peak_power = ee_pps[ee_pps.type == 'wind_power'].cap.sum()
wind_power_plant = pp.WindPowerPlant(**kwargs)
wind_series = wind_power_plant.feedin(
weather=w_cell, installed_capacity=wind_peak_power)
wind_series.name = w_cell.name
wind_cap[w_cell.name] = wind_peak_power
# PV
pv_peak_power = ee_pps[ee_pps.type == 'solar_power'].cap.sum()
pv_plant = pp.Photovoltaic(**kwargs)
pv_series = pv_plant.feedin(
weather=w_cell, peak_power=pv_peak_power)
pv_series.name = w_cell.name
pv_cap[w_cell.name] = pv_peak_power
# Combine the results to a DataFrame
try:
pv_df = pd.concat([pv_df, pv_series], axis=1)
wind_df = pd.concat([wind_df, wind_series], axis=1)
except:
pv_df = pv_series.to_frame()
wind_df = wind_series.to_frame()
# Write capacity into a dataframe
capw = pd.Series(pd.DataFrame.from_dict(wind_cap, orient='index')[0])
capw.name = 'wind_pwr'
cappv = pd.Series(pd.DataFrame.from_dict(pv_cap, orient='index')[0])
cappv.name = 'pv_pwr'
cap = pd.concat([capw, cappv], axis=1)
return pv_df, wind_df, cap
def store_full_df(self, pv_df, wind_df, **kwargs):
''
dpath = kwargs.get(
'dpath', path.join(path.expanduser("~"), '.oemof'))
filename = kwargs.get('filename', 'feedin_' + kwargs['region'].name)
fullpath = path.join(dpath, filename)
if kwargs['store'] == 'hf5':
pv_df.to_hdf(fullpath + '.hf5', 'pv_pwr')
wind_df.to_hdf(fullpath + '.hf5', 'wind_pwr')
if kwargs['store'] == 'csv':
pv_df.to_csv(fullpath + '_pv.csv')
wind_df.to_csv(fullpath + '_wind.csv')
|
oemof/oemof.db
|
oemof/db/config.py
|
load_config
|
python
|
def load_config(filename):
if filename is None:
filename = ''
abs_filename = os.path.join(os.getcwd(), filename)
global FILE
# find the config file
if os.path.isfile(filename):
FILE = filename
elif os.path.isfile(abs_filename):
FILE = abs_filename
elif os.path.isfile(FILE):
pass
else:
if os.path.dirname(filename):
file_not_found = filename
else:
file_not_found = abs_filename
file_not_found_message(file_not_found)
# load config
init(FILE)
|
Load data from config file to `cfg` that can be accessed by get, set
afterwards.
Specify absolute or relative path to your config file.
:param filename: Relative or absolute path
:type filename: str
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L48-L81
|
[
"def init(FILE):\n \"\"\"\n Read config file\n\n :param FILE: Absolute path to config file (incl. filename)\n :type FILE: str\n \"\"\"\n try:\n cfg.read(FILE)\n global _loaded\n _loaded = True\n except:\n file_not_found_message(FILE)\n"
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 5 12:26:40 2014
:module-author: steffen
:filename: config.py
This module provides a highlevel layer for reading and writing config files.
There must be a file called "config.ini" in the root-folder of the project.
The file has to be of the following structure to be imported correctly.
# this is a comment \n
# the filestructure is like: \n
\n
[netCDF] \n
RootFolder = c://netCDF \n
FilePrefix = cd2_ \n
\n
[mySQL] \n
host = localhost \n
user = guest \n
password = root \n
database = znes \n
\n
[SectionName] \n
OptionName = value \n
Option2 = value2 \n
"""
import os
import logging
try:
import configparser as cp
except:
# to be compatible with Python2.7
import ConfigParser as cp
FILENAME = 'config.ini'
FILE = os.path.join(os.path.expanduser("~"), '.oemof', FILENAME)
cfg = cp.RawConfigParser()
_loaded = False
def file_not_found_message(file_not_found):
"""
Show error message incl. help if file not found
:param filename:
:type filename: str
"""
logging.error(
"""
Config file {file} cannot be found. Make sure this file exists!
An exemplary section in the config file looks as follows
[database]
username = username under which to connect to the database
database = name of the database from which to read
host = host to connect to
port = port to connect to
For further advice, see in the docs (https://oemofdb.readthedocs.io)
how to format the config.
""".format(file=file_not_found))
def main():
pass
def init(FILE):
"""
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
"""
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
file_not_found_message(FILE)
def get(section, key):
"""
returns the value of a given key of a given section of the main
config file.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:returns: the value which will be casted to float, int or boolean.
if no cast is successfull, the raw string will be returned.
"""
# FILE = 'config_misc'
if not _loaded:
init(FILE)
try:
return cfg.getfloat(section, key)
except Exception:
try:
return cfg.getint(section, key)
except:
try:
return cfg.getboolean(section, key)
except:
return cfg.get(section, key)
def set(section, key, value):
"""
sets a value to a [section] key - pair.
if the section doesn't exist yet, it will be created.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:param value: the value.
:type value: float, int, str.
"""
if not _loaded:
init()
if not cfg.has_section(section):
cfg.add_section(section)
cfg.set(section, key, value)
with open(FILE, 'w') as configfile:
cfg.write(configfile)
if __name__ == "__main__":
main()
|
oemof/oemof.db
|
oemof/db/config.py
|
init
|
python
|
def init(FILE):
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
file_not_found_message(FILE)
|
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L113-L125
|
[
"def file_not_found_message(file_not_found):\n \"\"\"\n Show error message incl. help if file not found\n\n :param filename:\n :type filename: str\n \"\"\"\n\n logging.error(\n \"\"\"\n Config file {file} cannot be found. Make sure this file exists!\n\n An exemplary section in the config file looks as follows\n\n [database]\n username = username under which to connect to the database\n database = name of the database from which to read\n host = host to connect to\n port = port to connect to\n\n For further advice, see in the docs (https://oemofdb.readthedocs.io)\n how to format the config.\n \"\"\".format(file=file_not_found))\n"
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 5 12:26:40 2014
:module-author: steffen
:filename: config.py
This module provides a highlevel layer for reading and writing config files.
There must be a file called "config.ini" in the root-folder of the project.
The file has to be of the following structure to be imported correctly.
# this is a comment \n
# the filestructure is like: \n
\n
[netCDF] \n
RootFolder = c://netCDF \n
FilePrefix = cd2_ \n
\n
[mySQL] \n
host = localhost \n
user = guest \n
password = root \n
database = znes \n
\n
[SectionName] \n
OptionName = value \n
Option2 = value2 \n
"""
import os
import logging
try:
import configparser as cp
except:
# to be compatible with Python2.7
import ConfigParser as cp
FILENAME = 'config.ini'
FILE = os.path.join(os.path.expanduser("~"), '.oemof', FILENAME)
cfg = cp.RawConfigParser()
_loaded = False
def load_config(filename):
"""
Load data from config file to `cfg` that can be accessed by get, set
afterwards.
Specify absolute or relative path to your config file.
:param filename: Relative or absolute path
:type filename: str
"""
if filename is None:
filename = ''
abs_filename = os.path.join(os.getcwd(), filename)
global FILE
# find the config file
if os.path.isfile(filename):
FILE = filename
elif os.path.isfile(abs_filename):
FILE = abs_filename
elif os.path.isfile(FILE):
pass
else:
if os.path.dirname(filename):
file_not_found = filename
else:
file_not_found = abs_filename
file_not_found_message(file_not_found)
# load config
init(FILE)
def file_not_found_message(file_not_found):
"""
Show error message incl. help if file not found
:param filename:
:type filename: str
"""
logging.error(
"""
Config file {file} cannot be found. Make sure this file exists!
An exemplary section in the config file looks as follows
[database]
username = username under which to connect to the database
database = name of the database from which to read
host = host to connect to
port = port to connect to
For further advice, see in the docs (https://oemofdb.readthedocs.io)
how to format the config.
""".format(file=file_not_found))
def main():
pass
def get(section, key):
"""
returns the value of a given key of a given section of the main
config file.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:returns: the value which will be casted to float, int or boolean.
if no cast is successfull, the raw string will be returned.
"""
# FILE = 'config_misc'
if not _loaded:
init(FILE)
try:
return cfg.getfloat(section, key)
except Exception:
try:
return cfg.getint(section, key)
except:
try:
return cfg.getboolean(section, key)
except:
return cfg.get(section, key)
def set(section, key, value):
"""
sets a value to a [section] key - pair.
if the section doesn't exist yet, it will be created.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:param value: the value.
:type value: float, int, str.
"""
if not _loaded:
init()
if not cfg.has_section(section):
cfg.add_section(section)
cfg.set(section, key, value)
with open(FILE, 'w') as configfile:
cfg.write(configfile)
if __name__ == "__main__":
main()
|
oemof/oemof.db
|
oemof/db/config.py
|
get
|
python
|
def get(section, key):
# FILE = 'config_misc'
if not _loaded:
init(FILE)
try:
return cfg.getfloat(section, key)
except Exception:
try:
return cfg.getint(section, key)
except:
try:
return cfg.getboolean(section, key)
except:
return cfg.get(section, key)
|
returns the value of a given key of a given section of the main
config file.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:returns: the value which will be casted to float, int or boolean.
if no cast is successfull, the raw string will be returned.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/config.py#L128-L154
|
[
"def init(FILE):\n \"\"\"\n Read config file\n\n :param FILE: Absolute path to config file (incl. filename)\n :type FILE: str\n \"\"\"\n try:\n cfg.read(FILE)\n global _loaded\n _loaded = True\n except:\n file_not_found_message(FILE)\n"
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 5 12:26:40 2014
:module-author: steffen
:filename: config.py
This module provides a highlevel layer for reading and writing config files.
There must be a file called "config.ini" in the root-folder of the project.
The file has to be of the following structure to be imported correctly.
# this is a comment \n
# the filestructure is like: \n
\n
[netCDF] \n
RootFolder = c://netCDF \n
FilePrefix = cd2_ \n
\n
[mySQL] \n
host = localhost \n
user = guest \n
password = root \n
database = znes \n
\n
[SectionName] \n
OptionName = value \n
Option2 = value2 \n
"""
import os
import logging
try:
import configparser as cp
except:
# to be compatible with Python2.7
import ConfigParser as cp
FILENAME = 'config.ini'
FILE = os.path.join(os.path.expanduser("~"), '.oemof', FILENAME)
cfg = cp.RawConfigParser()
_loaded = False
def load_config(filename):
"""
Load data from config file to `cfg` that can be accessed by get, set
afterwards.
Specify absolute or relative path to your config file.
:param filename: Relative or absolute path
:type filename: str
"""
if filename is None:
filename = ''
abs_filename = os.path.join(os.getcwd(), filename)
global FILE
# find the config file
if os.path.isfile(filename):
FILE = filename
elif os.path.isfile(abs_filename):
FILE = abs_filename
elif os.path.isfile(FILE):
pass
else:
if os.path.dirname(filename):
file_not_found = filename
else:
file_not_found = abs_filename
file_not_found_message(file_not_found)
# load config
init(FILE)
def file_not_found_message(file_not_found):
"""
Show error message incl. help if file not found
:param filename:
:type filename: str
"""
logging.error(
"""
Config file {file} cannot be found. Make sure this file exists!
An exemplary section in the config file looks as follows
[database]
username = username under which to connect to the database
database = name of the database from which to read
host = host to connect to
port = port to connect to
For further advice, see in the docs (https://oemofdb.readthedocs.io)
how to format the config.
""".format(file=file_not_found))
def main():
pass
def init(FILE):
"""
Read config file
:param FILE: Absolute path to config file (incl. filename)
:type FILE: str
"""
try:
cfg.read(FILE)
global _loaded
_loaded = True
except:
file_not_found_message(FILE)
def set(section, key, value):
"""
sets a value to a [section] key - pair.
if the section doesn't exist yet, it will be created.
:param section: the section.
:type section: str.
:param key: the key.
:type key: str.
:param value: the value.
:type value: float, int, str.
"""
if not _loaded:
init()
if not cfg.has_section(section):
cfg.add_section(section)
cfg.set(section, key, value)
with open(FILE, 'w') as configfile:
cfg.write(configfile)
if __name__ == "__main__":
main()
|
oemof/oemof.db
|
oemof/db/__init__.py
|
url
|
python
|
def url(section="postGIS", config_file=None):
cfg.load_config(config_file)
try:
pw = keyring.get_password(cfg.get(section, "database"),
cfg.get(section, "username"))
except NoSectionError as e:
print("There is no section {section} in your config file. Please "
"choose one available section from your config file or "
"specify a new one!".format(
section=section))
exit(-1)
if pw is None:
try:
pw = cfg.get(section, "pw")
except option:
pw = getpass.getpass(prompt="No password available in your "\
"keyring for database {database}. "
"\n\nEnter your password to " \
"store it in "
"keyring:".format(database=section))
keyring.set_password(section, cfg.get(section, "username"), pw)
except NoSectionError:
print("Unable to find the 'postGIS' section in oemof's config." +
"\nExiting.")
exit(-1)
return "postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}".format(
user=cfg.get(section, "username"),
passwd=pw,
host=cfg.get(section, "host"),
db=cfg.get(section, "database"),
port=int(cfg.get(section, "port")))
|
Retrieve the URL used to connect to the database.
Use this if you have your own means of accessing the database and do not
want to use :func:`engine` or :func:`connection`.
Parameters
----------
section : str, optional
The `config.ini` section corresponding to the targeted database.
It should contain all the details that needed to set up a connection.
Returns
-------
database URL : str
The URL with which one can connect to the database. Be careful as this
will probably contain sensitive data like the username/password
combination.
config_file : str, optional
Relative of absolute of config.ini. If not specified, it tries to read
from .oemof/config.ini in your HOME dir
Notes
-----
For documentation on config.ini see the README section on
:ref:`configuring <readme#configuration>` :mod:`oemof.db`.
|
train
|
https://github.com/oemof/oemof.db/blob/d51ac50187f03a875bd7ce5991ed4772e8b77b93/oemof/db/__init__.py#L11-L73
| null |
from configparser import NoOptionError as option, NoSectionError
from sqlalchemy import create_engine
import keyring
from . import config as cfg
from oemof.db.tools import db_table2pandas
import getpass
__version__ = '0.0.6dev'
def engine(section="postGIS", config_file=None):
"""Creates engine object for database access
If keyword argument `section` is used it requires an existing config.ini
file at the right location.
Parameters
----------
section : str, optional
Section (in config.ini) of targeted database containing connection
details that are used to set up connection
config_file : str, optional
Relative of absolute of config.ini. If not specified, it tries to read
from .oemof/config.ini in your HOME dir
Returns
-------
engine : :class:`sqlalchemy.engine.Engine`
Engine for sqlalchemy
Notes
-----
For documentation on config.ini see the README section on
:ref:`configuring <readme#configuration>` :mod:`oemof.db`.
"""
return create_engine(url(section, config_file=config_file))
def connection(section="postGIS", config_file=None):
"""Database connection method of sqlalchemy engine object
This function purely calls the `connect()` method of the engine object
returned by :py:func:`engine`.
For description of parameters see :py:func:`engine`.
"""
return engine(section=section, config_file=config_file).connect()
|
wooga/play-deliver
|
playdeliver/sync_command.py
|
SyncCommand.execute
|
python
|
def execute(self):
try:
if self.upstream:
if self.options['listings'] is True:
listing.upload(self.client, self.source_directory)
self.client.commit()
if self.options['images'] is True:
image.upload(self.client, self.source_directory)
self.client.commit()
if self.options['inapp'] is True:
inapp_product.upload(self.client, self.source_directory)
else:
if self.options['listings'] is True:
listing.download(self.client, self.source_directory)
if self.options['images'] is True:
image.download(self.client, self.source_directory)
if self.options['inapp'] is True:
inapp_product.download(self.client, self.source_directory)
except client.AccessTokenRefreshError:
print(
'The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
|
Execute the command.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/sync_command.py#L43-L66
|
[
"def upload(client, source_dir):\n \"\"\"\n Upload images to play store.\n\n The function will iterate through source_dir and upload all matching\n image_types found in folder herachy.\n \"\"\"\n print('')\n print('upload images')\n print('-------------')\n base_image_folders = [\n os.path.join(source_dir, 'images', x) for x in image_types]\n\n for type_folder in base_image_folders:\n if os.path.exists(type_folder):\n image_type = os.path.basename(type_folder)\n langfolders = filter(os.path.isdir, list_dir_abspath(type_folder))\n for language_dir in langfolders:\n language = os.path.basename(language_dir)\n delete_and_upload_images(\n client, image_type, language, type_folder)\n",
"def upload(client, source_dir):\n \"\"\"Upload listing files in source_dir. folder herachy.\"\"\"\n print('')\n print('upload store listings')\n print('---------------------')\n listings_folder = os.path.join(source_dir, 'listings')\n langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder))\n\n for language_dir in langfolders:\n language = os.path.basename(language_dir)\n with open(os.path.join(language_dir, 'listing.json')) as listings_file:\n listing = json.load(listings_file)\n listing_response = client.update(\n 'listings', language=language, body=listing)\n\n print(' Listing for language %s was updated.' %\n listing_response['language'])\n",
"def upload(client, source_dir):\n \"\"\"Upload inappproducts to play store.\"\"\"\n print('')\n print('upload inappproducs')\n print('---------------------')\n\n products_folder = os.path.join(source_dir, 'products')\n product_files = filter(os.path.isfile, list_dir_abspath(products_folder))\n\n current_product_skus = map(lambda product: product['sku'], client.list_inappproducts())\n print(current_product_skus)\n for product_file in product_files:\n with open(product_file) as product_file:\n product = json.load(product_file)\n #check if the product is new\n sku = product['sku']\n product['packageName'] = client.package_name\n print(sku)\n if sku in current_product_skus:\n print(\"update product {0}\".format(sku))\n client.update_inappproduct(product, sku)\n else:\n print(\"create product {0}\".format(sku))\n client.insert_inappproduct(product)\n",
"def download(client, target_dir):\n \"\"\"Download images from play store into folder herachy.\"\"\"\n print('download image previews')\n print(\n \"Warning! Downloaded images are only previews!\"\n \"They may be to small for upload.\")\n tree = {}\n listings = client.list('listings')\n languages = map(lambda listing: listing['language'], listings)\n\n parameters = [{'imageType': image_type, 'language': language}\n for image_type in image_types for language in languages]\n tree = {image_type: {language: list()\n for language in languages}\n for image_type in image_types}\n\n for params in parameters:\n result = client.list('images', **params)\n image_type = params['imageType']\n language = params['language']\n tree[image_type][language] = map(\n lambda r: r['url'], result)\n\n for image_type, language_map in tree.items():\n for language, files in language_map.items():\n if len(files) > 0:\n mkdir_p(\n os.path.join(target_dir, 'images', image_type, language))\n if image_type in single_image_types:\n if len(files) > 0:\n image_url = files[0]\n path = os.path.join(\n target_dir,\n 'images',\n image_type,\n language,\n image_type)\n load_and_save_image(image_url, path)\n else:\n for idx, image_url in enumerate(files):\n path = os.path.join(\n target_dir,\n 'images',\n image_type,\n language,\n image_type + '_' + str(idx))\n load_and_save_image(image_url, path)\n",
"def download(client, target_dir):\n \"\"\"Download listing files from play and saves them into folder herachy.\"\"\"\n print('')\n print('download store listings')\n print('---------------------')\n listings = client.list('listings')\n for listing in listings:\n path = os.path.join(target_dir, 'listings', listing['language'])\n mkdir_p(path)\n with open(os.path.join(path, 'listing.json'), 'w') as outfile:\n print(\"save listing for {0}\".format(listing['language']))\n json.dump(\n listing, outfile, sort_keys=True,\n indent=4, separators=(',', ': '))\n",
"def download(client, target_dir):\n \"\"\"Download inappproducts from play store.\"\"\"\n print('')\n print(\"download inappproducts\")\n print('---------------------')\n products = client.list_inappproducts()\n\n for product in products:\n path = os.path.join(target_dir, 'products')\n del product['packageName']\n mkdir_p(path)\n with open(os.path.join(path, product['sku'] + '.json'), 'w') as outfile:\n print(\"save product for {0}\".format(product['sku']))\n json.dump(\n product, outfile, sort_keys=True,\n indent=4, separators=(',', ': '))\n"
] |
class SyncCommand(object):
"""The Sync command executes the up-/download of items from play."""
def __init__(self, package_name, source_directory, upstream,
credentials, **options):
"""
Create new SyncCommand with given params.
package_name the app package to upload download items to
credentials = a GoogleCredentials object
source_directory = the directory to sync from/to
upstream = uplaod or download items from/to play
options = additional options
"""
super(SyncCommand, self).__init__()
self.package_name = package_name
self.credentials = credentials
self.source_directory = source_directory
self.upstream = upstream
self.options = options
self._init_credentials()
self._initialize_client()
def _initialize_client(self):
self.client = Client(self.package_name, self.service)
def _init_credentials(self):
http = httplib2.Http()
http = self.credentials.authorize(http)
self.service = build('androidpublisher', 'v2', http=http)
|
wooga/play-deliver
|
playdeliver/inapp_product.py
|
upload
|
python
|
def upload(client, source_dir):
print('')
print('upload inappproducs')
print('---------------------')
products_folder = os.path.join(source_dir, 'products')
product_files = filter(os.path.isfile, list_dir_abspath(products_folder))
current_product_skus = map(lambda product: product['sku'], client.list_inappproducts())
print(current_product_skus)
for product_file in product_files:
with open(product_file) as product_file:
product = json.load(product_file)
#check if the product is new
sku = product['sku']
product['packageName'] = client.package_name
print(sku)
if sku in current_product_skus:
print("update product {0}".format(sku))
client.update_inappproduct(product, sku)
else:
print("create product {0}".format(sku))
client.insert_inappproduct(product)
|
Upload inappproducts to play store.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/inapp_product.py#L7-L30
|
[
"def list_dir_abspath(path):\n \"\"\"\n Return a list absolute file paths.\n\n see mkdir_p os.listdir.\n \"\"\"\n return map(lambda f: os.path.join(path, f), os.listdir(path))\n"
] |
"""This module helps for uploading and downloading inappproducts from/to play."""
import os
import json
from file_util import mkdir_p
from file_util import list_dir_abspath
def download(client, target_dir):
"""Download inappproducts from play store."""
print('')
print("download inappproducts")
print('---------------------')
products = client.list_inappproducts()
for product in products:
path = os.path.join(target_dir, 'products')
del product['packageName']
mkdir_p(path)
with open(os.path.join(path, product['sku'] + '.json'), 'w') as outfile:
print("save product for {0}".format(product['sku']))
json.dump(
product, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
|
wooga/play-deliver
|
playdeliver/inapp_product.py
|
download
|
python
|
def download(client, target_dir):
print('')
print("download inappproducts")
print('---------------------')
products = client.list_inappproducts()
for product in products:
path = os.path.join(target_dir, 'products')
del product['packageName']
mkdir_p(path)
with open(os.path.join(path, product['sku'] + '.json'), 'w') as outfile:
print("save product for {0}".format(product['sku']))
json.dump(
product, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
|
Download inappproducts from play store.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/inapp_product.py#L33-L48
|
[
"def mkdir_p(path):\n \"\"\"Create a new directory with with all missing folders in between.\"\"\"\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n"
] |
"""This module helps for uploading and downloading inappproducts from/to play."""
import os
import json
from file_util import mkdir_p
from file_util import list_dir_abspath
def upload(client, source_dir):
"""Upload inappproducts to play store."""
print('')
print('upload inappproducs')
print('---------------------')
products_folder = os.path.join(source_dir, 'products')
product_files = filter(os.path.isfile, list_dir_abspath(products_folder))
current_product_skus = map(lambda product: product['sku'], client.list_inappproducts())
print(current_product_skus)
for product_file in product_files:
with open(product_file) as product_file:
product = json.load(product_file)
#check if the product is new
sku = product['sku']
product['packageName'] = client.package_name
print(sku)
if sku in current_product_skus:
print("update product {0}".format(sku))
client.update_inappproduct(product, sku)
else:
print("create product {0}".format(sku))
client.insert_inappproduct(product)
|
wooga/play-deliver
|
playdeliver/client.py
|
Client.list
|
python
|
def list(self, service_name, **params):
result = self._invoke_call(service_name, 'list', **params)
if result is not None:
return result.get(service_name, list())
return list()
|
convinent access method for list.
service_name describes the endpoint to call
the `list` function on.
images.list or apks.list.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/client.py#L25-L37
|
[
"def _invoke_call(self, service_name, function_name, **params):\n self.ensure_edit_id()\n params = self.build_params(params)\n service_impl = getattr(self.edits(), service_name)\n method = getattr(service_impl(), function_name)\n\n if method and callable(method):\n return method(**params).execute()\n pass\n return None\n"
] |
class Client(object):
"""
Client object which handles google api edits.
It hold the service user credentials and app package name.
"""
def __init__(self, package_name, service):
"""
create new client object.
package_name = the app package you want to access
credentials_file = path to credentials json
email = the service user email
key = the service user key
"""
super(Client, self).__init__()
self.package_name = package_name
self.service = service
self.edit_id = None
def list_inappproducts(self):
"""temp function to list inapp products."""
result = self.service.inappproducts().list(
packageName=self.package_name).execute()
if result is not None:
return result.get('inappproduct', list())
return list()
def insert_inappproduct(self, product):
return self.service.inappproducts().insert(
packageName=self.package_name, body=product).execute()
def update_inappproduct(self, product, sku):
return self.service.inappproducts().update(
packageName=self.package_name, sku=sku, body=product).execute()
def update(self, service_name, **params):
"""
convinent access method for update.
service_name describes the endpoint to call
the `update` function on.
images.update or apks.update.
"""
return self._invoke_call(service_name, 'update', **params)
def deleteall(self, service_name, **params):
"""
convinent access method for deleteall.
service_name describes the endpoint to call
the `deleteall` function on.
images.deleteall or apks.deleteall.
"""
return self._invoke_call(service_name, 'deleteall', **params)
def upload(self, service_name, **params):
"""
convinent access method for upload.
service_name describes the endpoint to call
the `upload` function on.
images.upload or apks.upload.
"""
return self._invoke_call(service_name, 'upload', **params)
def commit(self):
"""commit current edits."""
request = self.edits().commit(**self.build_params()).execute()
print 'Edit "%s" has been committed' % (request['id'])
self.edit_id = None
def _invoke_call(self, service_name, function_name, **params):
self.ensure_edit_id()
params = self.build_params(params)
service_impl = getattr(self.edits(), service_name)
method = getattr(service_impl(), function_name)
if method and callable(method):
return method(**params).execute()
pass
return None
def build_params(self, params={}):
"""
build a params dictionary with current editId and packageName.
use optional params parameter
to merge additional params into resulting dictionary.
"""
z = params.copy()
z.update({'editId': self.edit_id, 'packageName': self.package_name})
return z
def edits(self):
"""Return current edits object."""
return self.service.edits()
def ensure_edit_id(self):
"""create edit id if edit id is None."""
if self.edit_id is None:
edit_request = self.edits().insert(
body={}, packageName=self.package_name)
result = edit_request.execute()
self.edit_id = result['id']
|
wooga/play-deliver
|
playdeliver/client.py
|
Client.list_inappproducts
|
python
|
def list_inappproducts(self):
result = self.service.inappproducts().list(
packageName=self.package_name).execute()
if result is not None:
return result.get('inappproduct', list())
return list()
|
temp function to list inapp products.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/client.py#L39-L46
| null |
class Client(object):
"""
Client object which handles google api edits.
It hold the service user credentials and app package name.
"""
def __init__(self, package_name, service):
"""
create new client object.
package_name = the app package you want to access
credentials_file = path to credentials json
email = the service user email
key = the service user key
"""
super(Client, self).__init__()
self.package_name = package_name
self.service = service
self.edit_id = None
def list(self, service_name, **params):
"""
convinent access method for list.
service_name describes the endpoint to call
the `list` function on.
images.list or apks.list.
"""
result = self._invoke_call(service_name, 'list', **params)
if result is not None:
return result.get(service_name, list())
return list()
def insert_inappproduct(self, product):
return self.service.inappproducts().insert(
packageName=self.package_name, body=product).execute()
def update_inappproduct(self, product, sku):
return self.service.inappproducts().update(
packageName=self.package_name, sku=sku, body=product).execute()
def update(self, service_name, **params):
"""
convinent access method for update.
service_name describes the endpoint to call
the `update` function on.
images.update or apks.update.
"""
return self._invoke_call(service_name, 'update', **params)
def deleteall(self, service_name, **params):
"""
convinent access method for deleteall.
service_name describes the endpoint to call
the `deleteall` function on.
images.deleteall or apks.deleteall.
"""
return self._invoke_call(service_name, 'deleteall', **params)
def upload(self, service_name, **params):
"""
convinent access method for upload.
service_name describes the endpoint to call
the `upload` function on.
images.upload or apks.upload.
"""
return self._invoke_call(service_name, 'upload', **params)
def commit(self):
"""commit current edits."""
request = self.edits().commit(**self.build_params()).execute()
print 'Edit "%s" has been committed' % (request['id'])
self.edit_id = None
def _invoke_call(self, service_name, function_name, **params):
self.ensure_edit_id()
params = self.build_params(params)
service_impl = getattr(self.edits(), service_name)
method = getattr(service_impl(), function_name)
if method and callable(method):
return method(**params).execute()
pass
return None
def build_params(self, params={}):
"""
build a params dictionary with current editId and packageName.
use optional params parameter
to merge additional params into resulting dictionary.
"""
z = params.copy()
z.update({'editId': self.edit_id, 'packageName': self.package_name})
return z
def edits(self):
"""Return current edits object."""
return self.service.edits()
def ensure_edit_id(self):
"""create edit id if edit id is None."""
if self.edit_id is None:
edit_request = self.edits().insert(
body={}, packageName=self.package_name)
result = edit_request.execute()
self.edit_id = result['id']
|
wooga/play-deliver
|
playdeliver/client.py
|
Client.commit
|
python
|
def commit(self):
request = self.edits().commit(**self.build_params()).execute()
print 'Edit "%s" has been committed' % (request['id'])
self.edit_id = None
|
commit current edits.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/client.py#L89-L94
|
[
"def build_params(self, params={}):\n \"\"\"\n build a params dictionary with current editId and packageName.\n\n use optional params parameter\n to merge additional params into resulting dictionary.\n \"\"\"\n z = params.copy()\n z.update({'editId': self.edit_id, 'packageName': self.package_name})\n return z\n",
"def edits(self):\n \"\"\"Return current edits object.\"\"\"\n return self.service.edits()\n"
] |
class Client(object):
"""
Client object which handles google api edits.
It hold the service user credentials and app package name.
"""
def __init__(self, package_name, service):
"""
create new client object.
package_name = the app package you want to access
credentials_file = path to credentials json
email = the service user email
key = the service user key
"""
super(Client, self).__init__()
self.package_name = package_name
self.service = service
self.edit_id = None
def list(self, service_name, **params):
"""
convinent access method for list.
service_name describes the endpoint to call
the `list` function on.
images.list or apks.list.
"""
result = self._invoke_call(service_name, 'list', **params)
if result is not None:
return result.get(service_name, list())
return list()
def list_inappproducts(self):
"""temp function to list inapp products."""
result = self.service.inappproducts().list(
packageName=self.package_name).execute()
if result is not None:
return result.get('inappproduct', list())
return list()
def insert_inappproduct(self, product):
return self.service.inappproducts().insert(
packageName=self.package_name, body=product).execute()
def update_inappproduct(self, product, sku):
return self.service.inappproducts().update(
packageName=self.package_name, sku=sku, body=product).execute()
def update(self, service_name, **params):
"""
convinent access method for update.
service_name describes the endpoint to call
the `update` function on.
images.update or apks.update.
"""
return self._invoke_call(service_name, 'update', **params)
def deleteall(self, service_name, **params):
"""
convinent access method for deleteall.
service_name describes the endpoint to call
the `deleteall` function on.
images.deleteall or apks.deleteall.
"""
return self._invoke_call(service_name, 'deleteall', **params)
def upload(self, service_name, **params):
"""
convinent access method for upload.
service_name describes the endpoint to call
the `upload` function on.
images.upload or apks.upload.
"""
return self._invoke_call(service_name, 'upload', **params)
def _invoke_call(self, service_name, function_name, **params):
self.ensure_edit_id()
params = self.build_params(params)
service_impl = getattr(self.edits(), service_name)
method = getattr(service_impl(), function_name)
if method and callable(method):
return method(**params).execute()
pass
return None
def build_params(self, params={}):
"""
build a params dictionary with current editId and packageName.
use optional params parameter
to merge additional params into resulting dictionary.
"""
z = params.copy()
z.update({'editId': self.edit_id, 'packageName': self.package_name})
return z
def edits(self):
"""Return current edits object."""
return self.service.edits()
def ensure_edit_id(self):
"""create edit id if edit id is None."""
if self.edit_id is None:
edit_request = self.edits().insert(
body={}, packageName=self.package_name)
result = edit_request.execute()
self.edit_id = result['id']
|
wooga/play-deliver
|
playdeliver/client.py
|
Client.build_params
|
python
|
def build_params(self, params={}):
z = params.copy()
z.update({'editId': self.edit_id, 'packageName': self.package_name})
return z
|
build a params dictionary with current editId and packageName.
use optional params parameter
to merge additional params into resulting dictionary.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/client.py#L107-L116
| null |
class Client(object):
"""
Client object which handles google api edits.
It hold the service user credentials and app package name.
"""
def __init__(self, package_name, service):
"""
create new client object.
package_name = the app package you want to access
credentials_file = path to credentials json
email = the service user email
key = the service user key
"""
super(Client, self).__init__()
self.package_name = package_name
self.service = service
self.edit_id = None
def list(self, service_name, **params):
"""
convinent access method for list.
service_name describes the endpoint to call
the `list` function on.
images.list or apks.list.
"""
result = self._invoke_call(service_name, 'list', **params)
if result is not None:
return result.get(service_name, list())
return list()
def list_inappproducts(self):
"""temp function to list inapp products."""
result = self.service.inappproducts().list(
packageName=self.package_name).execute()
if result is not None:
return result.get('inappproduct', list())
return list()
def insert_inappproduct(self, product):
return self.service.inappproducts().insert(
packageName=self.package_name, body=product).execute()
def update_inappproduct(self, product, sku):
return self.service.inappproducts().update(
packageName=self.package_name, sku=sku, body=product).execute()
def update(self, service_name, **params):
"""
convinent access method for update.
service_name describes the endpoint to call
the `update` function on.
images.update or apks.update.
"""
return self._invoke_call(service_name, 'update', **params)
def deleteall(self, service_name, **params):
"""
convinent access method for deleteall.
service_name describes the endpoint to call
the `deleteall` function on.
images.deleteall or apks.deleteall.
"""
return self._invoke_call(service_name, 'deleteall', **params)
def upload(self, service_name, **params):
"""
convinent access method for upload.
service_name describes the endpoint to call
the `upload` function on.
images.upload or apks.upload.
"""
return self._invoke_call(service_name, 'upload', **params)
def commit(self):
"""commit current edits."""
request = self.edits().commit(**self.build_params()).execute()
print 'Edit "%s" has been committed' % (request['id'])
self.edit_id = None
def _invoke_call(self, service_name, function_name, **params):
self.ensure_edit_id()
params = self.build_params(params)
service_impl = getattr(self.edits(), service_name)
method = getattr(service_impl(), function_name)
if method and callable(method):
return method(**params).execute()
pass
return None
def edits(self):
"""Return current edits object."""
return self.service.edits()
def ensure_edit_id(self):
"""create edit id if edit id is None."""
if self.edit_id is None:
edit_request = self.edits().insert(
body={}, packageName=self.package_name)
result = edit_request.execute()
self.edit_id = result['id']
|
wooga/play-deliver
|
playdeliver/client.py
|
Client.ensure_edit_id
|
python
|
def ensure_edit_id(self):
if self.edit_id is None:
edit_request = self.edits().insert(
body={}, packageName=self.package_name)
result = edit_request.execute()
self.edit_id = result['id']
|
create edit id if edit id is None.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/client.py#L122-L128
|
[
"def edits(self):\n \"\"\"Return current edits object.\"\"\"\n return self.service.edits()\n"
] |
class Client(object):
"""
Client object which handles google api edits.
It hold the service user credentials and app package name.
"""
def __init__(self, package_name, service):
"""
create new client object.
package_name = the app package you want to access
credentials_file = path to credentials json
email = the service user email
key = the service user key
"""
super(Client, self).__init__()
self.package_name = package_name
self.service = service
self.edit_id = None
def list(self, service_name, **params):
"""
convinent access method for list.
service_name describes the endpoint to call
the `list` function on.
images.list or apks.list.
"""
result = self._invoke_call(service_name, 'list', **params)
if result is not None:
return result.get(service_name, list())
return list()
def list_inappproducts(self):
"""temp function to list inapp products."""
result = self.service.inappproducts().list(
packageName=self.package_name).execute()
if result is not None:
return result.get('inappproduct', list())
return list()
def insert_inappproduct(self, product):
return self.service.inappproducts().insert(
packageName=self.package_name, body=product).execute()
def update_inappproduct(self, product, sku):
return self.service.inappproducts().update(
packageName=self.package_name, sku=sku, body=product).execute()
def update(self, service_name, **params):
"""
convinent access method for update.
service_name describes the endpoint to call
the `update` function on.
images.update or apks.update.
"""
return self._invoke_call(service_name, 'update', **params)
def deleteall(self, service_name, **params):
"""
convinent access method for deleteall.
service_name describes the endpoint to call
the `deleteall` function on.
images.deleteall or apks.deleteall.
"""
return self._invoke_call(service_name, 'deleteall', **params)
def upload(self, service_name, **params):
"""
convinent access method for upload.
service_name describes the endpoint to call
the `upload` function on.
images.upload or apks.upload.
"""
return self._invoke_call(service_name, 'upload', **params)
def commit(self):
"""commit current edits."""
request = self.edits().commit(**self.build_params()).execute()
print 'Edit "%s" has been committed' % (request['id'])
self.edit_id = None
def _invoke_call(self, service_name, function_name, **params):
self.ensure_edit_id()
params = self.build_params(params)
service_impl = getattr(self.edits(), service_name)
method = getattr(service_impl(), function_name)
if method and callable(method):
return method(**params).execute()
pass
return None
def build_params(self, params={}):
"""
build a params dictionary with current editId and packageName.
use optional params parameter
to merge additional params into resulting dictionary.
"""
z = params.copy()
z.update({'editId': self.edit_id, 'packageName': self.package_name})
return z
def edits(self):
"""Return current edits object."""
return self.service.edits()
|
wooga/play-deliver
|
playdeliver/file_util.py
|
list_dir_abspath
|
python
|
def list_dir_abspath(path):
return map(lambda f: os.path.join(path, f), os.listdir(path))
|
Return a list absolute file paths.
see mkdir_p os.listdir.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/file_util.py#L7-L13
| null |
"""module with utility functions for the file system."""
import os
import errno
def mkdir_p(path):
"""Create a new directory with with all missing folders in between."""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
wooga/play-deliver
|
playdeliver/playdeliver.py
|
execute
|
python
|
def execute(options):
# Load the key in PKCS 12 format that you downloaded from the Google APIs
# Console when you created your Service account.
package_name = options['<package>']
source_directory = options['<output_dir>']
if options['upload'] is True:
upstream = True
else:
upstream = False
sub_tasks = {'images': options['--images'], 'listings': options['--listings'], 'inapp': options['--inapp']}
if sub_tasks == {'images': False, 'listings': False, 'inapp': False}:
sub_tasks = {'images': True, 'listings': True, 'inapp': True}
credentials = create_credentials(credentials_file=options['--credentials'],
service_email=options['--service-email'],
service_key=options['--key'])
command = SyncCommand(
package_name, source_directory, upstream, credentials, **sub_tasks)
command.execute()
|
execute the tool with given options.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/playdeliver.py#L20-L42
|
[
"def create_credentials(credentials_file=None,\n service_email=None,\n service_key=None,\n scope='https://www.googleapis.com/auth/androidpublisher'):\n \"\"\"\n Create Google credentials object.\n\n If given credentials_file is None, try to retrieve file path from environment \n or look up file in homefolder.\n \"\"\"\n credentials = None\n if service_email is None and service_key is None:\n print(credentials_file)\n if credentials_file is None:\n # try load file from env\n key = 'PLAY_DELIVER_CREDENTIALS'\n if key in os.environ:\n credentials_file = os.environ[key]\n\n if credentials_file is None:\n # try to find the file in home\n path = os.path.expanduser('~/.playdeliver/credentials.json')\n if os.path.exists(path):\n credentials_file = path\n\n if credentials_file is not None:\n credentials = client.GoogleCredentials.from_stream(\n credentials_file)\n credentials = credentials.create_scoped(scope)\n else:\n sys.exit(\"no credentials\")\n else:\n credentials = client.SignedJwtAssertionCredentials(\n service_email, _load_key(service_key), scope=scope)\n return credentials\n",
"def execute(self):\n \"\"\"Execute the command.\"\"\"\n try:\n if self.upstream:\n if self.options['listings'] is True:\n listing.upload(self.client, self.source_directory)\n self.client.commit()\n if self.options['images'] is True:\n image.upload(self.client, self.source_directory)\n self.client.commit()\n if self.options['inapp'] is True:\n inapp_product.upload(self.client, self.source_directory)\n else:\n if self.options['listings'] is True:\n listing.download(self.client, self.source_directory)\n if self.options['images'] is True:\n image.download(self.client, self.source_directory)\n if self.options['inapp'] is True:\n inapp_product.download(self.client, self.source_directory)\n\n except client.AccessTokenRefreshError:\n print(\n 'The credentials have been revoked or expired, please re-run'\n 'the application to re-authorize')\n"
] |
#!/usr/bin/env python
"""starting point for the playdeliver tool."""
import os
import sys
from sync_command import SyncCommand
from oauth2client import client
def _load_key(location):
if location is not None and os.path.isfile(location):
f = open(location, 'rb')
key = f.read()
f.close()
return key
else:
sys.exit("no key file found")
def create_credentials(credentials_file=None,
service_email=None,
service_key=None,
scope='https://www.googleapis.com/auth/androidpublisher'):
"""
Create Google credentials object.
If given credentials_file is None, try to retrieve file path from environment
or look up file in homefolder.
"""
credentials = None
if service_email is None and service_key is None:
print(credentials_file)
if credentials_file is None:
# try load file from env
key = 'PLAY_DELIVER_CREDENTIALS'
if key in os.environ:
credentials_file = os.environ[key]
if credentials_file is None:
# try to find the file in home
path = os.path.expanduser('~/.playdeliver/credentials.json')
if os.path.exists(path):
credentials_file = path
if credentials_file is not None:
credentials = client.GoogleCredentials.from_stream(
credentials_file)
credentials = credentials.create_scoped(scope)
else:
sys.exit("no credentials")
else:
credentials = client.SignedJwtAssertionCredentials(
service_email, _load_key(service_key), scope=scope)
return credentials
|
wooga/play-deliver
|
playdeliver/playdeliver.py
|
create_credentials
|
python
|
def create_credentials(credentials_file=None,
service_email=None,
service_key=None,
scope='https://www.googleapis.com/auth/androidpublisher'):
credentials = None
if service_email is None and service_key is None:
print(credentials_file)
if credentials_file is None:
# try load file from env
key = 'PLAY_DELIVER_CREDENTIALS'
if key in os.environ:
credentials_file = os.environ[key]
if credentials_file is None:
# try to find the file in home
path = os.path.expanduser('~/.playdeliver/credentials.json')
if os.path.exists(path):
credentials_file = path
if credentials_file is not None:
credentials = client.GoogleCredentials.from_stream(
credentials_file)
credentials = credentials.create_scoped(scope)
else:
sys.exit("no credentials")
else:
credentials = client.SignedJwtAssertionCredentials(
service_email, _load_key(service_key), scope=scope)
return credentials
|
Create Google credentials object.
If given credentials_file is None, try to retrieve file path from environment
or look up file in homefolder.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/playdeliver.py#L45-L79
|
[
"def _load_key(location):\n if location is not None and os.path.isfile(location):\n f = open(location, 'rb')\n key = f.read()\n f.close()\n return key\n else:\n sys.exit(\"no key file found\")\n"
] |
#!/usr/bin/env python
"""starting point for the playdeliver tool."""
import os
import sys
from sync_command import SyncCommand
from oauth2client import client
def _load_key(location):
if location is not None and os.path.isfile(location):
f = open(location, 'rb')
key = f.read()
f.close()
return key
else:
sys.exit("no key file found")
def execute(options):
"""execute the tool with given options."""
# Load the key in PKCS 12 format that you downloaded from the Google APIs
# Console when you created your Service account.
package_name = options['<package>']
source_directory = options['<output_dir>']
if options['upload'] is True:
upstream = True
else:
upstream = False
sub_tasks = {'images': options['--images'], 'listings': options['--listings'], 'inapp': options['--inapp']}
if sub_tasks == {'images': False, 'listings': False, 'inapp': False}:
sub_tasks = {'images': True, 'listings': True, 'inapp': True}
credentials = create_credentials(credentials_file=options['--credentials'],
service_email=options['--service-email'],
service_key=options['--key'])
command = SyncCommand(
package_name, source_directory, upstream, credentials, **sub_tasks)
command.execute()
|
wooga/play-deliver
|
playdeliver/listing.py
|
upload
|
python
|
def upload(client, source_dir):
print('')
print('upload store listings')
print('---------------------')
listings_folder = os.path.join(source_dir, 'listings')
langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
with open(os.path.join(language_dir, 'listing.json')) as listings_file:
listing = json.load(listings_file)
listing_response = client.update(
'listings', language=language, body=listing)
print(' Listing for language %s was updated.' %
listing_response['language'])
|
Upload listing files in source_dir. folder herachy.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/listing.py#L8-L24
|
[
"def list_dir_abspath(path):\n \"\"\"\n Return a list absolute file paths.\n\n see mkdir_p os.listdir.\n \"\"\"\n return map(lambda f: os.path.join(path, f), os.listdir(path))\n"
] |
"""This module helps for uploading and downloading listings from/to play."""
import os
import json
from file_util import mkdir_p
from file_util import list_dir_abspath
def download(client, target_dir):
"""Download listing files from play and saves them into folder herachy."""
print('')
print('download store listings')
print('---------------------')
listings = client.list('listings')
for listing in listings:
path = os.path.join(target_dir, 'listings', listing['language'])
mkdir_p(path)
with open(os.path.join(path, 'listing.json'), 'w') as outfile:
print("save listing for {0}".format(listing['language']))
json.dump(
listing, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
|
wooga/play-deliver
|
playdeliver/listing.py
|
download
|
python
|
def download(client, target_dir):
print('')
print('download store listings')
print('---------------------')
listings = client.list('listings')
for listing in listings:
path = os.path.join(target_dir, 'listings', listing['language'])
mkdir_p(path)
with open(os.path.join(path, 'listing.json'), 'w') as outfile:
print("save listing for {0}".format(listing['language']))
json.dump(
listing, outfile, sort_keys=True,
indent=4, separators=(',', ': '))
|
Download listing files from play and saves them into folder herachy.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/listing.py#L27-L40
|
[
"def mkdir_p(path):\n \"\"\"Create a new directory with with all missing folders in between.\"\"\"\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n"
] |
"""This module helps for uploading and downloading listings from/to play."""
import os
import json
from file_util import mkdir_p
from file_util import list_dir_abspath
def upload(client, source_dir):
"""Upload listing files in source_dir. folder herachy."""
print('')
print('upload store listings')
print('---------------------')
listings_folder = os.path.join(source_dir, 'listings')
langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
with open(os.path.join(language_dir, 'listing.json')) as listings_file:
listing = json.load(listings_file)
listing_response = client.update(
'listings', language=language, body=listing)
print(' Listing for language %s was updated.' %
listing_response['language'])
|
wooga/play-deliver
|
playdeliver/image.py
|
upload
|
python
|
def upload(client, source_dir):
print('')
print('upload images')
print('-------------')
base_image_folders = [
os.path.join(source_dir, 'images', x) for x in image_types]
for type_folder in base_image_folders:
if os.path.exists(type_folder):
image_type = os.path.basename(type_folder)
langfolders = filter(os.path.isdir, list_dir_abspath(type_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
delete_and_upload_images(
client, image_type, language, type_folder)
|
Upload images to play store.
The function will iterate through source_dir and upload all matching
image_types found in folder herachy.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/image.py#L23-L43
|
[
"def list_dir_abspath(path):\n \"\"\"\n Return a list absolute file paths.\n\n see mkdir_p os.listdir.\n \"\"\"\n return map(lambda f: os.path.join(path, f), os.listdir(path))\n",
"def delete_and_upload_images(client, image_type, language, base_dir):\n \"\"\"\n Delete and upload images with given image_type and language.\n\n Function will stage delete and stage upload all\n found images in matching folders.\n \"\"\"\n print('{0} {1}'.format(image_type, language))\n files_in_dir = os.listdir(os.path.join(base_dir, language))\n delete_result = client.deleteall(\n 'images', imageType=image_type, language=language)\n\n deleted = delete_result.get('deleted', list())\n for deleted_files in deleted:\n print(' delete image: {0}'.format(deleted_files['id']))\n\n for image_file in files_in_dir[:8]:\n image_file_path = os.path.join(base_dir, language, image_file)\n image_response = client.upload(\n 'images',\n imageType=image_type,\n language=language,\n media_body=image_file_path)\n print(\" upload image {0} new id {1}\".format(image_file, image_response['image']['id']))\n"
] |
"""This module helps for uploading and downloading images from/to play."""
import imghdr
import httplib2
import os
from file_util import mkdir_p
from file_util import list_dir_abspath
image_types = ["featureGraphic",
"icon",
"phoneScreenshots",
"promoGraphic",
"sevenInchScreenshots",
"tenInchScreenshots",
"tvBanner",
"tvScreenshots"]
single_image_types = ['tvBanner',
'promoGraphic',
'icon',
'featureGraphic']
def delete_and_upload_images(client, image_type, language, base_dir):
"""
Delete and upload images with given image_type and language.
Function will stage delete and stage upload all
found images in matching folders.
"""
print('{0} {1}'.format(image_type, language))
files_in_dir = os.listdir(os.path.join(base_dir, language))
delete_result = client.deleteall(
'images', imageType=image_type, language=language)
deleted = delete_result.get('deleted', list())
for deleted_files in deleted:
print(' delete image: {0}'.format(deleted_files['id']))
for image_file in files_in_dir[:8]:
image_file_path = os.path.join(base_dir, language, image_file)
image_response = client.upload(
'images',
imageType=image_type,
language=language,
media_body=image_file_path)
print(" upload image {0} new id {1}".format(image_file, image_response['image']['id']))
def download(client, target_dir):
"""Download images from play store into folder herachy."""
print('download image previews')
print(
"Warning! Downloaded images are only previews!"
"They may be to small for upload.")
tree = {}
listings = client.list('listings')
languages = map(lambda listing: listing['language'], listings)
parameters = [{'imageType': image_type, 'language': language}
for image_type in image_types for language in languages]
tree = {image_type: {language: list()
for language in languages}
for image_type in image_types}
for params in parameters:
result = client.list('images', **params)
image_type = params['imageType']
language = params['language']
tree[image_type][language] = map(
lambda r: r['url'], result)
for image_type, language_map in tree.items():
for language, files in language_map.items():
if len(files) > 0:
mkdir_p(
os.path.join(target_dir, 'images', image_type, language))
if image_type in single_image_types:
if len(files) > 0:
image_url = files[0]
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type)
load_and_save_image(image_url, path)
else:
for idx, image_url in enumerate(files):
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type + '_' + str(idx))
load_and_save_image(image_url, path)
def load_and_save_image(url, destination):
"""Download image from given url and saves it to destination."""
from urllib2 import Request, urlopen, URLError, HTTPError
# create the url and the request
req = Request(url)
# Open the url
try:
f = urlopen(req)
print "downloading " + url
# Open our local file for writing
local_file = open(destination, "wb")
# Write to our local file
local_file.write(f.read())
local_file.close()
file_type = imghdr.what(destination)
local_file = open(destination, "rb")
data = local_file.read()
local_file.close()
final_file = open(destination + '.' + file_type, "wb")
final_file.write(data)
final_file.close()
print('save image preview {0}'.format(destination + '.' + file_type))
os.remove(destination)
# handle errors
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
|
wooga/play-deliver
|
playdeliver/image.py
|
delete_and_upload_images
|
python
|
def delete_and_upload_images(client, image_type, language, base_dir):
print('{0} {1}'.format(image_type, language))
files_in_dir = os.listdir(os.path.join(base_dir, language))
delete_result = client.deleteall(
'images', imageType=image_type, language=language)
deleted = delete_result.get('deleted', list())
for deleted_files in deleted:
print(' delete image: {0}'.format(deleted_files['id']))
for image_file in files_in_dir[:8]:
image_file_path = os.path.join(base_dir, language, image_file)
image_response = client.upload(
'images',
imageType=image_type,
language=language,
media_body=image_file_path)
print(" upload image {0} new id {1}".format(image_file, image_response['image']['id']))
|
Delete and upload images with given image_type and language.
Function will stage delete and stage upload all
found images in matching folders.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/image.py#L46-L69
| null |
"""This module helps for uploading and downloading images from/to play."""
import imghdr
import httplib2
import os
from file_util import mkdir_p
from file_util import list_dir_abspath
image_types = ["featureGraphic",
"icon",
"phoneScreenshots",
"promoGraphic",
"sevenInchScreenshots",
"tenInchScreenshots",
"tvBanner",
"tvScreenshots"]
single_image_types = ['tvBanner',
'promoGraphic',
'icon',
'featureGraphic']
def upload(client, source_dir):
"""
Upload images to play store.
The function will iterate through source_dir and upload all matching
image_types found in folder herachy.
"""
print('')
print('upload images')
print('-------------')
base_image_folders = [
os.path.join(source_dir, 'images', x) for x in image_types]
for type_folder in base_image_folders:
if os.path.exists(type_folder):
image_type = os.path.basename(type_folder)
langfolders = filter(os.path.isdir, list_dir_abspath(type_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
delete_and_upload_images(
client, image_type, language, type_folder)
def download(client, target_dir):
"""Download images from play store into folder herachy."""
print('download image previews')
print(
"Warning! Downloaded images are only previews!"
"They may be to small for upload.")
tree = {}
listings = client.list('listings')
languages = map(lambda listing: listing['language'], listings)
parameters = [{'imageType': image_type, 'language': language}
for image_type in image_types for language in languages]
tree = {image_type: {language: list()
for language in languages}
for image_type in image_types}
for params in parameters:
result = client.list('images', **params)
image_type = params['imageType']
language = params['language']
tree[image_type][language] = map(
lambda r: r['url'], result)
for image_type, language_map in tree.items():
for language, files in language_map.items():
if len(files) > 0:
mkdir_p(
os.path.join(target_dir, 'images', image_type, language))
if image_type in single_image_types:
if len(files) > 0:
image_url = files[0]
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type)
load_and_save_image(image_url, path)
else:
for idx, image_url in enumerate(files):
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type + '_' + str(idx))
load_and_save_image(image_url, path)
def load_and_save_image(url, destination):
"""Download image from given url and saves it to destination."""
from urllib2 import Request, urlopen, URLError, HTTPError
# create the url and the request
req = Request(url)
# Open the url
try:
f = urlopen(req)
print "downloading " + url
# Open our local file for writing
local_file = open(destination, "wb")
# Write to our local file
local_file.write(f.read())
local_file.close()
file_type = imghdr.what(destination)
local_file = open(destination, "rb")
data = local_file.read()
local_file.close()
final_file = open(destination + '.' + file_type, "wb")
final_file.write(data)
final_file.close()
print('save image preview {0}'.format(destination + '.' + file_type))
os.remove(destination)
# handle errors
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
|
wooga/play-deliver
|
playdeliver/image.py
|
download
|
python
|
def download(client, target_dir):
print('download image previews')
print(
"Warning! Downloaded images are only previews!"
"They may be to small for upload.")
tree = {}
listings = client.list('listings')
languages = map(lambda listing: listing['language'], listings)
parameters = [{'imageType': image_type, 'language': language}
for image_type in image_types for language in languages]
tree = {image_type: {language: list()
for language in languages}
for image_type in image_types}
for params in parameters:
result = client.list('images', **params)
image_type = params['imageType']
language = params['language']
tree[image_type][language] = map(
lambda r: r['url'], result)
for image_type, language_map in tree.items():
for language, files in language_map.items():
if len(files) > 0:
mkdir_p(
os.path.join(target_dir, 'images', image_type, language))
if image_type in single_image_types:
if len(files) > 0:
image_url = files[0]
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type)
load_and_save_image(image_url, path)
else:
for idx, image_url in enumerate(files):
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type + '_' + str(idx))
load_and_save_image(image_url, path)
|
Download images from play store into folder herachy.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/image.py#L72-L118
|
[
"def mkdir_p(path):\n \"\"\"Create a new directory with with all missing folders in between.\"\"\"\n try:\n os.makedirs(path)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n",
"def load_and_save_image(url, destination):\n \"\"\"Download image from given url and saves it to destination.\"\"\"\n from urllib2 import Request, urlopen, URLError, HTTPError\n # create the url and the request\n req = Request(url)\n\n # Open the url\n try:\n f = urlopen(req)\n print \"downloading \" + url\n\n # Open our local file for writing\n\n local_file = open(destination, \"wb\")\n # Write to our local file\n local_file.write(f.read())\n local_file.close()\n\n file_type = imghdr.what(destination)\n local_file = open(destination, \"rb\")\n data = local_file.read()\n local_file.close()\n\n final_file = open(destination + '.' + file_type, \"wb\")\n final_file.write(data)\n final_file.close()\n print('save image preview {0}'.format(destination + '.' + file_type))\n os.remove(destination)\n\n # handle errors\n except HTTPError, e:\n print \"HTTP Error:\", e.code, url\n except URLError, e:\n print \"URL Error:\", e.reason, url\n"
] |
"""This module helps for uploading and downloading images from/to play."""
import imghdr
import httplib2
import os
from file_util import mkdir_p
from file_util import list_dir_abspath
image_types = ["featureGraphic",
"icon",
"phoneScreenshots",
"promoGraphic",
"sevenInchScreenshots",
"tenInchScreenshots",
"tvBanner",
"tvScreenshots"]
single_image_types = ['tvBanner',
'promoGraphic',
'icon',
'featureGraphic']
def upload(client, source_dir):
"""
Upload images to play store.
The function will iterate through source_dir and upload all matching
image_types found in folder herachy.
"""
print('')
print('upload images')
print('-------------')
base_image_folders = [
os.path.join(source_dir, 'images', x) for x in image_types]
for type_folder in base_image_folders:
if os.path.exists(type_folder):
image_type = os.path.basename(type_folder)
langfolders = filter(os.path.isdir, list_dir_abspath(type_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
delete_and_upload_images(
client, image_type, language, type_folder)
def delete_and_upload_images(client, image_type, language, base_dir):
"""
Delete and upload images with given image_type and language.
Function will stage delete and stage upload all
found images in matching folders.
"""
print('{0} {1}'.format(image_type, language))
files_in_dir = os.listdir(os.path.join(base_dir, language))
delete_result = client.deleteall(
'images', imageType=image_type, language=language)
deleted = delete_result.get('deleted', list())
for deleted_files in deleted:
print(' delete image: {0}'.format(deleted_files['id']))
for image_file in files_in_dir[:8]:
image_file_path = os.path.join(base_dir, language, image_file)
image_response = client.upload(
'images',
imageType=image_type,
language=language,
media_body=image_file_path)
print(" upload image {0} new id {1}".format(image_file, image_response['image']['id']))
def load_and_save_image(url, destination):
"""Download image from given url and saves it to destination."""
from urllib2 import Request, urlopen, URLError, HTTPError
# create the url and the request
req = Request(url)
# Open the url
try:
f = urlopen(req)
print "downloading " + url
# Open our local file for writing
local_file = open(destination, "wb")
# Write to our local file
local_file.write(f.read())
local_file.close()
file_type = imghdr.what(destination)
local_file = open(destination, "rb")
data = local_file.read()
local_file.close()
final_file = open(destination + '.' + file_type, "wb")
final_file.write(data)
final_file.close()
print('save image preview {0}'.format(destination + '.' + file_type))
os.remove(destination)
# handle errors
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
|
wooga/play-deliver
|
playdeliver/image.py
|
load_and_save_image
|
python
|
def load_and_save_image(url, destination):
from urllib2 import Request, urlopen, URLError, HTTPError
# create the url and the request
req = Request(url)
# Open the url
try:
f = urlopen(req)
print "downloading " + url
# Open our local file for writing
local_file = open(destination, "wb")
# Write to our local file
local_file.write(f.read())
local_file.close()
file_type = imghdr.what(destination)
local_file = open(destination, "rb")
data = local_file.read()
local_file.close()
final_file = open(destination + '.' + file_type, "wb")
final_file.write(data)
final_file.close()
print('save image preview {0}'.format(destination + '.' + file_type))
os.remove(destination)
# handle errors
except HTTPError, e:
print "HTTP Error:", e.code, url
except URLError, e:
print "URL Error:", e.reason, url
|
Download image from given url and saves it to destination.
|
train
|
https://github.com/wooga/play-deliver/blob/9de0f35376f5342720b3a90bd3ca296b1f3a3f4c/playdeliver/image.py#L121-L154
| null |
"""This module helps for uploading and downloading images from/to play."""
import imghdr
import httplib2
import os
from file_util import mkdir_p
from file_util import list_dir_abspath
image_types = ["featureGraphic",
"icon",
"phoneScreenshots",
"promoGraphic",
"sevenInchScreenshots",
"tenInchScreenshots",
"tvBanner",
"tvScreenshots"]
single_image_types = ['tvBanner',
'promoGraphic',
'icon',
'featureGraphic']
def upload(client, source_dir):
"""
Upload images to play store.
The function will iterate through source_dir and upload all matching
image_types found in folder herachy.
"""
print('')
print('upload images')
print('-------------')
base_image_folders = [
os.path.join(source_dir, 'images', x) for x in image_types]
for type_folder in base_image_folders:
if os.path.exists(type_folder):
image_type = os.path.basename(type_folder)
langfolders = filter(os.path.isdir, list_dir_abspath(type_folder))
for language_dir in langfolders:
language = os.path.basename(language_dir)
delete_and_upload_images(
client, image_type, language, type_folder)
def delete_and_upload_images(client, image_type, language, base_dir):
"""
Delete and upload images with given image_type and language.
Function will stage delete and stage upload all
found images in matching folders.
"""
print('{0} {1}'.format(image_type, language))
files_in_dir = os.listdir(os.path.join(base_dir, language))
delete_result = client.deleteall(
'images', imageType=image_type, language=language)
deleted = delete_result.get('deleted', list())
for deleted_files in deleted:
print(' delete image: {0}'.format(deleted_files['id']))
for image_file in files_in_dir[:8]:
image_file_path = os.path.join(base_dir, language, image_file)
image_response = client.upload(
'images',
imageType=image_type,
language=language,
media_body=image_file_path)
print(" upload image {0} new id {1}".format(image_file, image_response['image']['id']))
def download(client, target_dir):
"""Download images from play store into folder herachy."""
print('download image previews')
print(
"Warning! Downloaded images are only previews!"
"They may be to small for upload.")
tree = {}
listings = client.list('listings')
languages = map(lambda listing: listing['language'], listings)
parameters = [{'imageType': image_type, 'language': language}
for image_type in image_types for language in languages]
tree = {image_type: {language: list()
for language in languages}
for image_type in image_types}
for params in parameters:
result = client.list('images', **params)
image_type = params['imageType']
language = params['language']
tree[image_type][language] = map(
lambda r: r['url'], result)
for image_type, language_map in tree.items():
for language, files in language_map.items():
if len(files) > 0:
mkdir_p(
os.path.join(target_dir, 'images', image_type, language))
if image_type in single_image_types:
if len(files) > 0:
image_url = files[0]
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type)
load_and_save_image(image_url, path)
else:
for idx, image_url in enumerate(files):
path = os.path.join(
target_dir,
'images',
image_type,
language,
image_type + '_' + str(idx))
load_and_save_image(image_url, path)
|
Kopachris/seshet
|
seshet/bot.py
|
_add_channel_names
|
python
|
def _add_channel_names(client, e):
chan = IRCstr(e.channel)
names = set([IRCstr(n) for n in e.name_list])
client.channels[chan] = SeshetChannel(chan, names)
|
Add a new channel to self.channels and initialize its user list.
Called as event handler for RPL_NAMES events. Do not call directly.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L517-L525
| null |
"""Implement SeshetBot as subclass of ircutils3.bot.SimpleBot."""
import logging
import os
from io import StringIO
from datetime import datetime
from ircutils3 import bot, client
from .utils import KVStore, Storage, IRCstr
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetUser.join
|
python
|
def join(self, channel):
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
|
Add this user to the channel's user list and add the channel to this
user's list of joined channels.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L23-L30
| null |
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetUser.part
|
python
|
def part(self, channel):
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
|
Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L32-L39
| null |
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetUser.quit
|
python
|
def quit(self):
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
|
Remove this user from all channels and reinitialize the user's list
of joined channels.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L41-L48
| null |
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetUser.change_nick
|
python
|
def change_nick(self, nick):
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
|
Update this user's nick in all joined channels.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L50-L58
| null |
class SeshetUser(object):
"""Represent one IRC user."""
def __init__(self, nick, user, host):
logging.debug("Building new SeshetUser, %s", nick)
self.nick = IRCstr(nick)
self.user = user
self.host = host
self.channels = []
def join(self, channel):
"""Add this user to the channel's user list and add the channel to this
user's list of joined channels.
"""
if channel not in self.channels:
channel.users.add(self.nick)
self.channels.append(channel)
def part(self, channel):
"""Remove this user from the channel's user list and remove the channel
from this user's list of joined channels.
"""
if channel in self.channels:
channel.users.remove(self.nick)
self.channels.remove(channel)
def quit(self):
"""Remove this user from all channels and reinitialize the user's list
of joined channels.
"""
for c in self.channels:
c.users.remove(self.nick)
self.channels = []
def change_nick(self, nick):
"""Update this user's nick in all joined channels."""
old_nick = self.nick
self.nick = IRCstr(nick)
for c in self.channels:
c.users.remove(old_nick)
c.users.add(self.nick)
def __str__(self):
return "{}!{}@{}".format(self.nick, self.user, self.host)
def __repr__(self):
temp = "<SeshetUser {}!{}@{} in channels {}>"
return temp.format(self.nick, self.user, self.host, self.channels)
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetChannel.log_message
|
python
|
def log_message(self, user, message):
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
|
Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L77-L94
| null |
class SeshetChannel(object):
"""Represent one IRC channel."""
def __init__(self, name, users, log_size=100):
self.name = IRCstr(name)
self.users = users
self.message_log = []
self._log_size = log_size
def log_message(self, user, message):
"""Log a channel message.
This log acts as a sort of cache so that recent activity can be searched
by the bot and command modules without querying the database.
"""
if isinstance(user, SeshetUser):
user = user.nick
elif not isinstance(user, IRCstr):
user = IRCstr(user)
time = datetime.utcnow()
self.message_log.append((time, user, message))
while len(self.message_log) > self._log_size:
del self.message_log[0]
def __str__(self):
return str(self.name)
def __repr__(self):
temp = "<SeshetChannel {} with {} users>"
return temp.format(self.name, len(self.users))
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetBot.log
|
python
|
def log(self, etype, source, msg='', target='', hostmask='', params=''):
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
|
Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L163-L194
| null |
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetBot.get_unique_users
|
python
|
def get_unique_users(self, chan):
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
|
Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L279-L292
| null |
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetBot.connect
|
python
|
def connect(self, *args, **kwargs):
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
|
Extend `client.SimpleClient.connect()` with defaults
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L430-L454
| null |
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetBot._log_to_file
|
python
|
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
|
Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L460-L482
| null |
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
Kopachris/seshet
|
seshet/bot.py
|
SeshetBot._loop
|
python
|
def _loop(self, map):
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/bot.py#L502-L514
|
[
"def before_poll(self):\n \"\"\"Called each loop before polling sockets for I/O.\"\"\"\n pass\n",
"def after_poll(self):\n \"\"\"Called each loop after polling sockets for I/O and\n handling any queued events.\n \"\"\"\n pass\n"
] |
class SeshetBot(bot.SimpleBot):
"""Extend `ircutils3.bot.SimpleBot`.
Each instance represents one bot, connected to one IRC network.
Each instance should have its own database, but can make use of
any shared command modules. The modules may have to be added to
the bot's database if the bot wasn't created using the
`seshet --config` or `seshet --new` commands.
"""
def __init__(self, nick='Seshet', db=None, debug_file=None, verbosity=99):
"""Extend `ircutils3.bot.SimpleBot.__init__()`.
Keyword argument `db` is required for running commands other
than core commands and should be an instance of pydal.DAL.
"""
# initialize debug logging
if debug_file is None:
logging.basicConfig(level=verbosity)
else:
logging.basicConfig(filename=os.path.expanduser(debug_file),
level=verbosity
)
logging.debug("Running `SimpleBot.__init__`...")
bot.SimpleBot.__init__(self, nick, auto_handle=False)
# define defaults
self.session = Storage()
self.log_file = 'seshet.log'
self.log_formats = {}
self.locale = {}
self.channels = {}
self.users = {}
if db is None:
# no database connection, only log to file and run
# core command modules
logging.info("No db, IRC logging will be done to file")
self.log = self._log_to_file
self.run_modules = self._run_only_core
# dummy KV store since no db
self.storage = Storage()
else:
logging.info("Using database %s", db)
self.db = db
self.storage = KVStore(db)
# Add default handlers
logging.debug("Adding default handlers...")
self.events["any"].add_handler(client._update_client_info)
self.events["ctcp_version"].add_handler(client._reply_to_ctcp_version)
self.events["name_reply"].add_handler(_add_channel_names)
def log(self, etype, source, msg='', target='', hostmask='', params=''):
"""Log an event in the database.
Required:
`etype` - event type. One of 'PRIVMSG', 'QUIT', 'PART', 'ACTION',
'NICK', 'JOIN', 'MODE', 'KICK', 'CTCP', or 'ERROR'. Enforced
by database model.
`source` - source of the event. Usually a user. For NICK events,
the user's original nickname. For ERROR events, this should be
the exception name, the module name, and the line number. The
full traceback will be logged in `msg`.
Optional:
`msg` - a message associated with the event.
`target` - the target the message was directed to. For MODE and KICK
events, this will be the user the event was performed on. For
NICK events, this will be channel the event was seen in (an event
will be created for each channel the user is seen by the bot in).
`hostmask` - a hostmask associated with the event.
`parms` - any additional parameters associated with the event, such as
a new nickname (for NICK events), mode switches (for MODE events),
or a dump of local variables (for ERROR events).
"""
self.db.event_log.insert(event_type=etype,
event_time=datetime.utcnow(),
source=source,
target=target,
message=msg,
host=hostmask,
params=params,
)
self.db.commit()
def run_modules(self, e):
# grab local pointer to self.db for faster lookup
db = self.db
# get initial list of modules handling this event type
event_types = db.modules.event_types
mod_enabled = db.modules.enabled
init_mods = db(event_types.contains(e.command) & mod_enabled).select()
logging.debug(("Running modules for {} command. "
"Initial module list:\n{}").format(e.command, init_mods)
)
if e.command in ('PRIVMSG', 'CTCP_ACTION', 'NOTICE'):
# narrow down list of modules to run based on event parameters
# lowercase for non-caps comparisons
m_low = e.message.lower()
bot_n = self.nickname.lower()
bot_u = self.user.lower()
bot_r = self.real_name.lower()
# indicates whether or not name has already been stripped from
# original message
for_us = False
if e.target.startswith('#'):
chan_msg = True
chan_nicks = self.channels[e.target].users
else:
chan_msg = False
fin_mods = list() # final list of modules to run
for mod in init_mods:
if e.source in mod.whitelist:
fin_mods.append(mod)
elif e.source in mod.blacklist:
pass
if self.nickname in mod.enicks:
if e.target == self.nickname or for_us:
fin_mods.append(mod)
elif m_low.startswith(bot_n):
# strip nickname from original message so modules can
# process it correctly
e.message = e.message[len(bot_n):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_u):
e.message = e.message[len(bot_u):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
elif m_low.startswith(bot_r):
e.message = e.message[len(bot_r):].lstrip(',: ')
fin_mods.append(mod)
for_us = True
if chan_msg:
if e.target in mod.dchannels:
pass
elif set(mod.dnicks) & chan_nicks:
pass
elif e.target in mod.echannels:
fin_mods.append(mod)
elif set(mod.enicks) & chan_nicks:
fin_mods.append(mod)
argv = m_low.split()
for mod in fin_mods:
# run each module
m = __import__(mod.name) # TODO: use importlib
# TODO: add authentication and rate limiting
for cmd, fun in m.commands.items():
if (mod.cmd_prefix + cmd) == argv[0]:
fun(self, e)
break
def get_unique_users(self, chan):
"""Get the set of users that are unique to the given channel (i.e. not
present in any other channel the bot is in).
"""
chan = IRCstr(chan)
these_users = self.channels[chan].users
other_users = set()
for c in self.channels.values():
if c.name != chan:
other_users |= c.users
return these_users - other_users
def on_message(self, e):
self.log('privmsg',
source=e.source,
msg=e.message,
target=e.target,
)
if e.target in self.channels:
# TODO: move this to self.log() so we don't have to get time twice?
self.channels[e.target].log_message(e.source, e.message)
self.run_modules(e)
def on_join(self, e):
self.log('join',
source=e.source,
target=e.target,
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
if e.source != self.nickname:
if nick not in self.users:
self.users[nick] = SeshetUser(nick, e.user, e.host)
self.users[nick].join(self.channels[chan])
self.run_modules(e)
def on_part(self, e):
self.log('part',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params[1:]),
target=e.target,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_quit(self, e):
nick = IRCstr(e.source)
for chan in self.channels.values():
if nick in chan.users:
self.log('quit',
source=e.source,
hostmask=e.user+'@'+e.host,
msg=' '.join(e.params),
target=chan.name,
)
self.users[nick].quit()
del self.users[nick]
def on_disconnect(self, e):
pass
def on_kick(self, e):
self.log('kick',
source=e.source,
target=e.target,
params=e.params[0],
msg=' '.join(e.params[1:]),
hostmask=e.user+'@'+e.host,
)
chan = IRCstr(e.target)
nick = IRCstr(e.source)
channel = self.channels[chan]
user = self.users[nick]
user.part(channel)
if nick == self.nickname:
# bot parted, remove that channel from all users and
# remove any users with empty channel lists
for u in self.users.values():
if channel in u.channels:
u.channels.remove(channel)
if len(u.channels) == 0:
del self.users[u.nick]
def on_nick_change(self, e):
new_nick = IRCstr(e.target)
old_nick = IRCstr(e.source)
for chan in self.channels.values():
if e.source in chan.user_list:
self.log('nick',
source=e.source,
hostmask=e.user+'@'+e.host,
target=chan.name,
params=e.target,
)
self.users[old_nick].change_nick(new_nick)
self.users[new_nick] = self.users[old_nick]
del self.users[old_nick]
def on_ctcp_action(self, e):
self.log('action',
source=e.source,
target=e.target,
msg=' '.join(e.params),
)
def on_welcome(self, e):
pass
def on_mode(self, e):
self.log('mode',
source=e.source,
msg=' '.join(e.params),
target=e.target,
)
def before_poll(self):
"""Called each loop before polling sockets for I/O."""
pass
def after_poll(self):
"""Called each loop after polling sockets for I/O and
handling any queued events.
"""
pass
def connect(self, *args, **kwargs):
"""Extend `client.SimpleClient.connect()` with defaults"""
defaults = {}
for i, k in enumerate(('host', 'port', 'channel', 'use_ssl', 'password')):
if i < len(args):
defaults[k] = args[i]
elif k in kwargs:
defaults[k] = kwargs[k]
else:
def_k = 'default_' + k
defaults[k] = getattr(self, def_k, None)
if defaults['use_ssl'] is None:
defaults['use_ssl'] = False
if defaults['host'] is None:
raise TypeError("missing 1 required positional argument: 'host'")
logging.info("Connecting to %s:%s and joining channels %s",
defaults['host'],
defaults['port'],
defaults['channel'],
)
client.SimpleClient.connect(self, **defaults)
def start(self):
logging.debug("Beginning poll loop")
self._loop(self.conn._map)
def _log_to_file(self, etype, source, msg='', target='', hostmask='', params=''):
"""Override `log()` if bot is not initialized with a database
connection. Do not call this method directly.
"""
today = datetime.utcnow()
# TODO: Use self.locale['timezone'] for changing time
date = today.strftime(self.locale['date_fmt'])
time = today.strftime(self.locale['time_fmt'])
datetime_s = today.strftime(self.locale['short_datetime_fmt'])
datetime_l = today.strftime(self.locale['long_datetime_fmt'])
if target == self.nickname and etype in ('privmsg', 'action'):
target = source
if etype in self.log_formats:
file_path = os.path.expanduser(self.log_file.format(**locals()))
file_dir = os.path.dirname(file_path)
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
line = self.log_formats[etype].format(**locals())
with open(file_path, 'a') as log:
log.write(line+'\n')
# else do nothing
def _run_only_core(self, *args, **kwargs):
"""Override `_run_commands()` if bot is not initialized with a
database connection. Do not call this method directly.
Rather than getting a list of enabled modules from the database,
Seshet will only run the commands defined by `core` in this package.
The bot will only run commands given in private message ("query")
by either an authenticated user defined in the instance's config file,
or by any user with the same hostmask if authentication isn't set up
in the instance's config file.
The `core` command module from this package can only be overridden if
the bot is initialized with a database connection and a new `core`
module is entered into the database.
"""
pass
def _loop(self, map):
"""The main loop. Poll sockets for I/O and run any other functions
that need to be run every loop.
"""
try:
from asyncore import poll
except ImportError:
raise Exception("Couldn't find poll function. Cannot start bot.")
while map:
self.before_poll()
poll(timeout=30.0, map=map)
self.after_poll()
|
Kopachris/seshet
|
seshet/config.py
|
build_db_tables
|
python
|
def build_db_tables(db):
if not isinstance(db, DAL) or not db._uri:
raise Exception("Need valid DAL object to define tables")
# event log - self-explanatory, logs all events
db.define_table('event_log',
Field('event_type'),
Field('event_time', 'datetime'),
Field('source'),
Field('target'),
Field('message', 'text'),
Field('host'),
Field('params', 'list:string'),
)
db.define_table('modules',
Field('name', notnull=True, unique=True, length=256),
Field('enabled', 'boolean'),
Field('event_types', 'list:string'),
Field('description', 'text'),
Field('echannels', 'list:string'),
Field('dchannels', 'list:string'),
Field('enicks', 'list:string'),
Field('dnicks', 'list:string'),
Field('whitelist', 'list:string'),
Field('blacklist', 'list:string'),
Field('cmd_prefix', length=1, default='!', notnull=True),
Field('acl', 'json'),
Field('rate_limit', 'json'),
)
|
Build Seshet's basic database schema. Requires one parameter,
`db` as `pydal.DAL` instance.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/config.py#L121-L153
| null |
"""Define default configuration, read configuration file, and apply
configuration to SeshetBot instance.
"""
from configparser import ConfigParser
from pydal import DAL, Field
default_config = """
[connection]
# passed to SeshetBot.connect()
server: chat.freenode.net
port: 6667
channels: #botwar
ssl: False
[client]
nickname: Seshet
user: seshet
realname: seshetbot
[welcome]
# stuff sent by the bot after connecting
use_nickserv: False
nickserv_pass:
user_mode: -x
[locale]
timezone: UTC
locale: en_US
# see docs for datetime.strftime
date_fmt: %m%d%y
# 071415
time_fmt: %H:%M:%S
# 11:49:57
short_datetime_fmt: %Y-%m-%d %H:%M:%S
# 2015-07-14 11:49:57
long_datetime_fmt: %A %d %B %Y at %H:%M:%S %Z
# Tuesday 14 July 2015 at 11:49:57 UTC
[database]
use_db: True
db_string: sqlite://seshet.db
[logging]
# if using db, this will be ignored
file: logs/%(target)s_%(date)s.log
privmsg: [{time}] <{source}> {msg}
join: [{time}] -- {source} ({hostmask}) has joined
part: [{time}] -- {source} ({hostmask}) has left ({msg})
quit: [{time}] -- {source} ({hostmask}) has quit ({msg})
kick: [{time}] -- {target} ({hostmask}) has been kicked by {source} ({msg})
mode: [{time}] -- {source} ({hostmask}) has set mode {parms} on {target}
nick: [{time}] -- {source} is now known as {parms}
action: [{time}] * {source} {msg}
[debug]
use_debug: False
# corresponds to levels in logging module
verbosity: warning
file: seshet-debug.log
"""
testing_config = """
[connection]
# passed to SeshetBot.connect()
server: chat.freenode.net
port: 6667
channels: #botwar
ssl: False
[client]
nickname: Seshet
user: seshet
realname: seshetbot
[welcome]
# stuff sent by the bot after connecting
use_nickserv: False
nickserv_pass:
user_mode: -x
[locale]
timezone: UTC
locale: en_US
# see docs for datetime.strftime
date_fmt: %m%d%y
# 071415
time_fmt: %H:%M:%S
# 11:49:57
short_datetime_fmt: %Y-%m-%d %H:%M:%S
# 2015-07-14 11:49:57
long_datetime_fmt: %A %d %B %Y at %H:%M:%S %Z
# Tuesday 14 July 2015 at 11:49:57 UTC
[database]
# no db connection for testing
use_db: False
[logging]
# if using db, this will be ignored
file: ~/.seshet/logs/{target}_{date}.log
privmsg: [{time}] <{source}> {msg}
join: [{time}] -- {source} ({hostmask}) has joined
part: [{time}] -- {source} ({hostmask}) has left ({msg})
quit: [{time}] -- {source} ({hostmask}) has quit ({msg})
kick: [{time}] -- {params} has been kicked by {source} ({msg})
mode: [{time}] -- {source} ({hostmask}) has set mode {msg} on {target}
nick: [{time}] -- {source} is now known as {params}
action: [{time}] * {source} {msg}
[debug]
# corresponds to levels in logging module
verbosity: debug
file: ~/.seshet/logs/debug.log
"""
def build_db_tables(db):
"""Build Seshet's basic database schema. Requires one parameter,
`db` as `pydal.DAL` instance.
"""
if not isinstance(db, DAL) or not db._uri:
raise Exception("Need valid DAL object to define tables")
# event log - self-explanatory, logs all events
db.define_table('event_log',
Field('event_type'),
Field('event_time', 'datetime'),
Field('source'),
Field('target'),
Field('message', 'text'),
Field('host'),
Field('params', 'list:string'),
)
db.define_table('modules',
Field('name', notnull=True, unique=True, length=256),
Field('enabled', 'boolean'),
Field('event_types', 'list:string'),
Field('description', 'text'),
Field('echannels', 'list:string'),
Field('dchannels', 'list:string'),
Field('enicks', 'list:string'),
Field('dnicks', 'list:string'),
Field('whitelist', 'list:string'),
Field('blacklist', 'list:string'),
Field('cmd_prefix', length=1, default='!', notnull=True),
Field('acl', 'json'),
Field('rate_limit', 'json'),
)
def build_bot(config_file=None):
"""Parse a config and return a SeshetBot instance. After, the bot can be run
simply by calling .connect() and then .start()
Optional arguments:
config_file - valid file path or ConfigParser instance
If config_file is None, will read default config defined in this module.
"""
from . import bot
config = ConfigParser(interpolation=None)
if config_file is None:
config.read_string(default_config)
elif isinstance(config_file, ConfigParser):
config = config_file
else:
config.read(config_file)
# shorter names
db_conf = config['database']
conn_conf = config['connection']
client_conf = config['client']
log_conf = config['logging']
verbosity = config['debug']['verbosity'].lower() or 'notset'
debug_file = config['debug']['file'] or None
# add more as they're used
if db_conf.getboolean('use_db'):
db = DAL(db_conf['db_string'])
build_db_tables(db)
log_file = None
log_fmts = {}
else:
db = None
log_file = log_conf.pop('file')
log_fmts = dict(log_conf)
# debug logging
debug_lvls = {'notset': 0,
'debug': 10,
'info': 20,
'warning': 30,
'error': 40,
'critical': 50,
}
lvl = int(debug_lvls[verbosity])
seshetbot = bot.SeshetBot(client_conf['nickname'], db, debug_file, lvl)
# connection info for connect()
seshetbot.default_host = conn_conf['server']
seshetbot.default_port = int(conn_conf['port'])
seshetbot.default_channel = conn_conf['channels'].split(',')
seshetbot.default_use_ssl = conn_conf.getboolean('ssl')
# client info
seshetbot.user = client_conf['user']
seshetbot.real_name = client_conf['realname']
# logging info
seshetbot.log_file = log_file
seshetbot.log_formats = log_fmts
seshetbot.locale = dict(config['locale'])
return seshetbot
|
Kopachris/seshet
|
seshet/config.py
|
build_bot
|
python
|
def build_bot(config_file=None):
from . import bot
config = ConfigParser(interpolation=None)
if config_file is None:
config.read_string(default_config)
elif isinstance(config_file, ConfigParser):
config = config_file
else:
config.read(config_file)
# shorter names
db_conf = config['database']
conn_conf = config['connection']
client_conf = config['client']
log_conf = config['logging']
verbosity = config['debug']['verbosity'].lower() or 'notset'
debug_file = config['debug']['file'] or None
# add more as they're used
if db_conf.getboolean('use_db'):
db = DAL(db_conf['db_string'])
build_db_tables(db)
log_file = None
log_fmts = {}
else:
db = None
log_file = log_conf.pop('file')
log_fmts = dict(log_conf)
# debug logging
debug_lvls = {'notset': 0,
'debug': 10,
'info': 20,
'warning': 30,
'error': 40,
'critical': 50,
}
lvl = int(debug_lvls[verbosity])
seshetbot = bot.SeshetBot(client_conf['nickname'], db, debug_file, lvl)
# connection info for connect()
seshetbot.default_host = conn_conf['server']
seshetbot.default_port = int(conn_conf['port'])
seshetbot.default_channel = conn_conf['channels'].split(',')
seshetbot.default_use_ssl = conn_conf.getboolean('ssl')
# client info
seshetbot.user = client_conf['user']
seshetbot.real_name = client_conf['realname']
# logging info
seshetbot.log_file = log_file
seshetbot.log_formats = log_fmts
seshetbot.locale = dict(config['locale'])
return seshetbot
|
Parse a config and return a SeshetBot instance. After, the bot can be run
simply by calling .connect() and then .start()
Optional arguments:
config_file - valid file path or ConfigParser instance
If config_file is None, will read default config defined in this module.
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/config.py#L156-L222
|
[
"def build_db_tables(db):\n \"\"\"Build Seshet's basic database schema. Requires one parameter,\n `db` as `pydal.DAL` instance.\n \"\"\"\n\n if not isinstance(db, DAL) or not db._uri:\n raise Exception(\"Need valid DAL object to define tables\")\n\n # event log - self-explanatory, logs all events\n db.define_table('event_log',\n Field('event_type'),\n Field('event_time', 'datetime'),\n Field('source'),\n Field('target'),\n Field('message', 'text'),\n Field('host'),\n Field('params', 'list:string'),\n )\n db.define_table('modules',\n Field('name', notnull=True, unique=True, length=256),\n Field('enabled', 'boolean'),\n Field('event_types', 'list:string'),\n Field('description', 'text'),\n Field('echannels', 'list:string'),\n Field('dchannels', 'list:string'),\n Field('enicks', 'list:string'),\n Field('dnicks', 'list:string'),\n Field('whitelist', 'list:string'),\n Field('blacklist', 'list:string'),\n Field('cmd_prefix', length=1, default='!', notnull=True),\n Field('acl', 'json'),\n Field('rate_limit', 'json'),\n )\n"
] |
"""Define default configuration, read configuration file, and apply
configuration to SeshetBot instance.
"""
from configparser import ConfigParser
from pydal import DAL, Field
default_config = """
[connection]
# passed to SeshetBot.connect()
server: chat.freenode.net
port: 6667
channels: #botwar
ssl: False
[client]
nickname: Seshet
user: seshet
realname: seshetbot
[welcome]
# stuff sent by the bot after connecting
use_nickserv: False
nickserv_pass:
user_mode: -x
[locale]
timezone: UTC
locale: en_US
# see docs for datetime.strftime
date_fmt: %m%d%y
# 071415
time_fmt: %H:%M:%S
# 11:49:57
short_datetime_fmt: %Y-%m-%d %H:%M:%S
# 2015-07-14 11:49:57
long_datetime_fmt: %A %d %B %Y at %H:%M:%S %Z
# Tuesday 14 July 2015 at 11:49:57 UTC
[database]
use_db: True
db_string: sqlite://seshet.db
[logging]
# if using db, this will be ignored
file: logs/%(target)s_%(date)s.log
privmsg: [{time}] <{source}> {msg}
join: [{time}] -- {source} ({hostmask}) has joined
part: [{time}] -- {source} ({hostmask}) has left ({msg})
quit: [{time}] -- {source} ({hostmask}) has quit ({msg})
kick: [{time}] -- {target} ({hostmask}) has been kicked by {source} ({msg})
mode: [{time}] -- {source} ({hostmask}) has set mode {parms} on {target}
nick: [{time}] -- {source} is now known as {parms}
action: [{time}] * {source} {msg}
[debug]
use_debug: False
# corresponds to levels in logging module
verbosity: warning
file: seshet-debug.log
"""
testing_config = """
[connection]
# passed to SeshetBot.connect()
server: chat.freenode.net
port: 6667
channels: #botwar
ssl: False
[client]
nickname: Seshet
user: seshet
realname: seshetbot
[welcome]
# stuff sent by the bot after connecting
use_nickserv: False
nickserv_pass:
user_mode: -x
[locale]
timezone: UTC
locale: en_US
# see docs for datetime.strftime
date_fmt: %m%d%y
# 071415
time_fmt: %H:%M:%S
# 11:49:57
short_datetime_fmt: %Y-%m-%d %H:%M:%S
# 2015-07-14 11:49:57
long_datetime_fmt: %A %d %B %Y at %H:%M:%S %Z
# Tuesday 14 July 2015 at 11:49:57 UTC
[database]
# no db connection for testing
use_db: False
[logging]
# if using db, this will be ignored
file: ~/.seshet/logs/{target}_{date}.log
privmsg: [{time}] <{source}> {msg}
join: [{time}] -- {source} ({hostmask}) has joined
part: [{time}] -- {source} ({hostmask}) has left ({msg})
quit: [{time}] -- {source} ({hostmask}) has quit ({msg})
kick: [{time}] -- {params} has been kicked by {source} ({msg})
mode: [{time}] -- {source} ({hostmask}) has set mode {msg} on {target}
nick: [{time}] -- {source} is now known as {params}
action: [{time}] * {source} {msg}
[debug]
# corresponds to levels in logging module
verbosity: debug
file: ~/.seshet/logs/debug.log
"""
def build_db_tables(db):
"""Build Seshet's basic database schema. Requires one parameter,
`db` as `pydal.DAL` instance.
"""
if not isinstance(db, DAL) or not db._uri:
raise Exception("Need valid DAL object to define tables")
# event log - self-explanatory, logs all events
db.define_table('event_log',
Field('event_type'),
Field('event_time', 'datetime'),
Field('source'),
Field('target'),
Field('message', 'text'),
Field('host'),
Field('params', 'list:string'),
)
db.define_table('modules',
Field('name', notnull=True, unique=True, length=256),
Field('enabled', 'boolean'),
Field('event_types', 'list:string'),
Field('description', 'text'),
Field('echannels', 'list:string'),
Field('dchannels', 'list:string'),
Field('enicks', 'list:string'),
Field('dnicks', 'list:string'),
Field('whitelist', 'list:string'),
Field('blacklist', 'list:string'),
Field('cmd_prefix', length=1, default='!', notnull=True),
Field('acl', 'json'),
Field('rate_limit', 'json'),
)
def build_bot(config_file=None):
"""Parse a config and return a SeshetBot instance. After, the bot can be run
simply by calling .connect() and then .start()
Optional arguments:
config_file - valid file path or ConfigParser instance
If config_file is None, will read default config defined in this module.
"""
from . import bot
config = ConfigParser(interpolation=None)
if config_file is None:
config.read_string(default_config)
elif isinstance(config_file, ConfigParser):
config = config_file
else:
config.read(config_file)
# shorter names
db_conf = config['database']
conn_conf = config['connection']
client_conf = config['client']
log_conf = config['logging']
verbosity = config['debug']['verbosity'].lower() or 'notset'
debug_file = config['debug']['file'] or None
# add more as they're used
if db_conf.getboolean('use_db'):
db = DAL(db_conf['db_string'])
build_db_tables(db)
log_file = None
log_fmts = {}
else:
db = None
log_file = log_conf.pop('file')
log_fmts = dict(log_conf)
# debug logging
debug_lvls = {'notset': 0,
'debug': 10,
'info': 20,
'warning': 30,
'error': 40,
'critical': 50,
}
lvl = int(debug_lvls[verbosity])
seshetbot = bot.SeshetBot(client_conf['nickname'], db, debug_file, lvl)
# connection info for connect()
seshetbot.default_host = conn_conf['server']
seshetbot.default_port = int(conn_conf['port'])
seshetbot.default_channel = conn_conf['channels'].split(',')
seshetbot.default_use_ssl = conn_conf.getboolean('ssl')
# client info
seshetbot.user = client_conf['user']
seshetbot.real_name = client_conf['realname']
# logging info
seshetbot.log_file = log_file
seshetbot.log_formats = log_fmts
seshetbot.locale = dict(config['locale'])
return seshetbot
|
Kopachris/seshet
|
seshet/utils.py
|
Storage.getlist
|
python
|
def getlist(self, key):
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
|
Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L94-L120
| null |
class Storage(dict):
"""A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example:
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__getnewargs__ = lambda self: getattr(dict,self).__getnewargs__(self)
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
def getfirst(self, key, default=None):
"""Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
|
Kopachris/seshet
|
seshet/utils.py
|
Storage.getfirst
|
python
|
def getfirst(self, key, default=None):
values = self.getlist(key)
return values[0] if values else default
|
Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L122-L144
|
[
"def getlist(self, key):\n \"\"\"Returns a Storage value as a list.\n\n If the value is a list it will be returned as-is.\n If object is None, an empty list will be returned.\n Otherwise, `[value]` will be returned.\n\n Example output for a query string of `?x=abc&y=abc&y=def`::\n\n >>> request = Storage()\n >>> request.vars = Storage()\n >>> request.vars.x = 'abc'\n >>> request.vars.y = ['abc', 'def']\n >>> request.vars.getlist('x')\n ['abc']\n >>> request.vars.getlist('y')\n ['abc', 'def']\n >>> request.vars.getlist('z')\n []\n\n \"\"\"\n\n value = self.get(key, [])\n if value is None or isinstance(value, (list, tuple)):\n return value\n else:\n return [value]\n"
] |
class Storage(dict):
"""A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example:
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__getnewargs__ = lambda self: getattr(dict,self).__getnewargs__(self)
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
def getfirst(self, key, default=None):
"""Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
|
Kopachris/seshet
|
seshet/utils.py
|
Storage.getlast
|
python
|
def getlast(self, key, default=None):
values = self.getlist(key)
return values[-1] if values else default
|
Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L146-L168
|
[
"def getlist(self, key):\n \"\"\"Returns a Storage value as a list.\n\n If the value is a list it will be returned as-is.\n If object is None, an empty list will be returned.\n Otherwise, `[value]` will be returned.\n\n Example output for a query string of `?x=abc&y=abc&y=def`::\n\n >>> request = Storage()\n >>> request.vars = Storage()\n >>> request.vars.x = 'abc'\n >>> request.vars.y = ['abc', 'def']\n >>> request.vars.getlist('x')\n ['abc']\n >>> request.vars.getlist('y')\n ['abc', 'def']\n >>> request.vars.getlist('z')\n []\n\n \"\"\"\n\n value = self.get(key, [])\n if value is None or isinstance(value, (list, tuple)):\n return value\n else:\n return [value]\n"
] |
class Storage(dict):
"""A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`, and setting obj.foo = None deletes item foo.
Example:
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
__slots__ = ()
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
__getitem__ = dict.get
__getattr__ = dict.get
__getnewargs__ = lambda self: getattr(dict,self).__getnewargs__(self)
__repr__ = lambda self: '<Storage %s>' % dict.__repr__(self)
__getstate__ = lambda self: None
__copy__ = lambda self: Storage(self)
def getlist(self, key):
"""Returns a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, `[value]` will be returned.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, [])
if value is None or isinstance(value, (list, tuple)):
return value
else:
return [value]
def getfirst(self, key, default=None):
"""Returns the first value of a list or the value itself when given a
`request.vars` style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
values = self.getlist(key)
return values[0] if values else default
def getlast(self, key, default=None):
"""Returns the last value of a list or value itself when given a
`request.vars` style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of `?x=abc&y=abc&y=def`::
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
values = self.getlist(key)
return values[-1] if values else default
|
Kopachris/seshet
|
seshet/utils.py
|
KVStore.popitem
|
python
|
def popitem(self):
all_items = self.items()
removed_item = random.choice(all_items)
self[removed_item[0]] = None
return removed_item
|
Unlike `dict.popitem()`, this is actually random
|
train
|
https://github.com/Kopachris/seshet/blob/d55bae01cff56762c5467138474145a2c17d1932/seshet/utils.py#L381-L386
|
[
"def items(self):\n return zip(self.keys(), self.values())\n"
] |
class KVStore:
"""Create a key/value store in the bot's database for each
command module to use for persistent storage. Can be accessed
either like a class:
>>> store = KVStore(db)
>>> store.foo = 'bar'
>>> store.foo
'bar'
Or like a dict:
>>> store['spam'] = 'eggs'
>>> store['spam']
'eggs'
The KVStore object uses `inspect` to determine which module
the object is being accessed from and will automatically create
a database table as needed or determine which one to use if it
already exists, so that each module the object is used from has
its own namespace.
KVStore has most of the same interfaces as an ordinary `dict`, but
is not a subclass of `dict` or `collections.UserDict` because
so many functions had to be completely rewritten to work with
KVStore's database model.
"""
def __init__(self, db):
# make sure some tables are defined:
if 'namespaces' not in db:
# list of registered modules
db.define_table('namespaces', Field('name'))
for m in db().select(db.namespaces.ALL):
# these are modules' own "namespaces"
tbl_name = 'kv_' + m.name
if tbl_name not in db:
db.define_table(tbl_name,
Field('k', 'string', unique=True),
Field('v', 'text'),
)
self._db = db # pydal DAL instance
# It's recommended to use a separate database
# for the bot and for the KV store to avoid
# accidental or malicious name collisions
#
# (Then why doesn't the default implimentation?)
def __getattr__(self, k):
if k.startswith('_'):
return self.__dict__[k]
db = self._db
tbl = self._get_calling_module()
tbl_name = 'kv_' + tbl if tbl is not None else None
if tbl is None or tbl_name not in db:
# table doesn't exist
return None
r = db(db[tbl_name].k == k)
if r.isempty():
# no db entry for this key
return None
r = r.select().first()
# db should return string, pickle expects bytes
return pickle.loads(r.v.encode(errors='ignore'))
def __setattr__(self, k, v):
if k.startswith('_'):
self.__dict__[k] = v
return
elif k in self.__dict__:
# instance attributes should be read-only-ish
raise AttributeError("Name already in use: %s" % k)
db = self._db
if v is not None:
v = pickle.dumps(v).decode(errors='ignore')
tbl = self._get_calling_module()
tbl_name = 'kv_' + tbl if tbl is not None else None
if tbl is None or tbl_name not in db:
if v is not None:
# module not registered, need to create
# a new table
self._register_module(tbl)
db[tbl_name].insert(k=k, v=repr(v))
else:
# no need to delete a non-existent key
return None
else:
if v is not None:
db[tbl_name].update_or_insert(db[tbl_name].k == k, k=k, v=v)
else:
db(db[tbl_name].k == k).delete()
db.commit()
self._db = db
def __delattr__(self, k):
self.__setattr__(k, None)
def __getitem__(self, k):
return self.__getattr__(k)
def __setitem__(self, k, v):
self.__setattr__(k, v)
def __delitem__(self, k):
self.__setattr__(k, None)
def _register_module(self, name):
db = self._db
tbl_name = 'kv_' + name
if db(db.namespaces.name == name).isempty():
db.namespaces.insert(name=name)
db.commit()
if tbl_name not in db:
db.define_table(tbl_name,
Field('k', 'string', unique=True),
Field('v', 'text'),
)
self._db = db
def _get_calling_module(self):
# in theory, bot modules will be registered with register_module
# when they're uploaded and installed
curfrm = inspect.currentframe()
for f in inspect.getouterframes(curfrm)[1:]:
if self.__module__.split('.')[-1] not in f[1]:
calling_file = f[1]
break
caller_mod = inspect.getmodulename(calling_file)
db = self._db
mod = db(db.namespaces.name == caller_mod)
if mod.isempty():
return None
else:
return caller_mod
def keys(self):
db = self._db
tbl = self._get_calling_module()
tbl_name = 'kv_' + tbl if tbl is not None else None
if tbl is None or tbl_name not in db:
return []
all_items = db().select(db[tbl_name].ALL)
all_keys = [r.k for r in all_items]
return all_keys
def values(self):
all_keys = self.keys()
all_vals = list()
for k in all_keys:
all_vals.append(self[k])
return all_vals
def update(self, other):
for k, v in other.items():
self[k] = v
return None
def items(self):
return zip(self.keys(), self.values())
def iterkeys(self):
return iter(self.keys())
def itervalues(self):
return iter(self.values())
def iteritems(self):
return iter(self.items())
def __iter__(self):
return iter(self.keys())
def __contains__(self, k):
if self[k] is not None:
return True
else:
return False
def __copy__(self):
"""Return a dict representing the current table"""
d = dict()
d.update(self.items())
return d
def copy(self):
"""Return a dict representing the current table"""
return self.__copy__()
def pop(self, k):
v = self[k]
self[k] = None
return v
def setdefault(self, k, v=None):
existing_v = self[k]
if existing_v is None:
self[k] = v
return v
return existing_v
def has_key(self, k):
return k in self
def get(self, k, v=None):
existing_v = self[k]
if existing_v is None:
return v
else:
return existing_v
def clear(self):
for k in self.keys():
self[k] = None
|
zlobspb/txtarantool
|
txtarantool.py
|
Request.pack_int_base128
|
python
|
def pack_int_base128(cls, value):
assert isinstance(value, int)
if value < 1 << 14:
return cls._int_base128[value]
if value < 1 << 21:
return struct_BBB.pack(
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 28:
return struct_BBBB.pack(
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 35:
return struct_BBBBB.pack(
value >> 28 & 0xff | 0x80,
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
raise OverflowError("Number is too large to be packed")
|
Pack integer value using LEB128 encoding
:param value: integer value to encode
:type value: int
:return: encoded value
:rtype: bytes
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L156-L193
| null |
class Request(object):
"""
Represents a single request to the server in compliance with the Tarantool protocol.
Responsible for data encapsulation and builds binary packet to be sent to the server.
This is the abstract base class. Specific request types are implemented by the inherited classes.
"""
TNT_OP_INSERT = 13
TNT_OP_SELECT = 17
TNT_OP_UPDATE = 19
TNT_OP_DELETE = 21
TNT_OP_CALL = 22
TNT_OP_PING = 65280
TNT_FLAG_RETURN = 0x01
TNT_FLAG_ADD = 0x02
TNT_FLAG_REPLACE = 0x04
# Pre-generated results of pack_int_base128() for small arguments (0..16383)
_int_base128 = tuple(
(
struct_B.pack(val) if val < 128 else struct_BB.pack(val >> 7 & 0xff | 0x80, val & 0x7F)
for val in xrange(0x4000)
)
)
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self._bytes = None
def __bytes__(self):
return self._bytes
__str__ = __bytes__
@staticmethod
def header(request_type, body_length, request_id):
return struct_LLL.pack(request_type, body_length, request_id)
@staticmethod
def pack_int(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: int
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BL.pack(4, value)
@staticmethod
def pack_long(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: long
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BQ.pack(8, value)
@classmethod
@classmethod
def pack_str(cls, value):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
"""
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
@classmethod
def pack_unicode(cls, value, charset="utf-8", errors="strict"):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: unicode
:return: packed value
:rtype: bytes
"""
assert isinstance(value, unicode)
try:
value = value.encode(charset, errors)
except UnicodeEncodeError as e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
def pack_field(self, value):
"""
Pack single field (string or integer value)
<field> ::= <int32_varint><data>
:param value: value to be packed
:type value: bytes, str, int or long
:return: packed value
:rtype: bytes
"""
if isinstance(value, str):
return self.pack_str(value)
elif isinstance(value, unicode):
return self.pack_unicode(value, self.charset, self.errors)
elif isinstance(value, int):
return self.pack_int(value)
elif isinstance(value, long):
return self.pack_long(value)
else:
raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
def pack_tuple(self, values):
"""
Pack tuple of values
<tuple> ::= <cardinality><field>+
:param value: tuple to be packed
:type value: tuple of scalar values (bytes, str or int)
:return: packed tuple
:rtype: bytes
"""
assert isinstance(values, (tuple, list))
cardinality = [struct_L.pack(len(values))]
packed_items = [self.pack_field(v) for v in values]
return b''.join(itertools.chain(cardinality, packed_items))
|
zlobspb/txtarantool
|
txtarantool.py
|
Request.pack_str
|
python
|
def pack_str(cls, value):
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
|
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L196-L209
|
[
"def pack_int_base128(cls, value):\n \"\"\"\n Pack integer value using LEB128 encoding\n :param value: integer value to encode\n :type value: int\n\n :return: encoded value\n :rtype: bytes\n \"\"\"\n assert isinstance(value, int)\n if value < 1 << 14:\n return cls._int_base128[value]\n\n if value < 1 << 21:\n return struct_BBB.pack(\n value >> 14 & 0xff | 0x80,\n value >> 7 & 0xff | 0x80,\n value & 0x7F\n )\n\n if value < 1 << 28:\n return struct_BBBB.pack(\n value >> 21 & 0xff | 0x80,\n value >> 14 & 0xff | 0x80,\n value >> 7 & 0xff | 0x80,\n value & 0x7F\n )\n\n if value < 1 << 35:\n return struct_BBBBB.pack(\n value >> 28 & 0xff | 0x80,\n value >> 21 & 0xff | 0x80,\n value >> 14 & 0xff | 0x80,\n value >> 7 & 0xff | 0x80,\n value & 0x7F\n )\n\n raise OverflowError(\"Number is too large to be packed\")\n"
] |
class Request(object):
"""
Represents a single request to the server in compliance with the Tarantool protocol.
Responsible for data encapsulation and builds binary packet to be sent to the server.
This is the abstract base class. Specific request types are implemented by the inherited classes.
"""
TNT_OP_INSERT = 13
TNT_OP_SELECT = 17
TNT_OP_UPDATE = 19
TNT_OP_DELETE = 21
TNT_OP_CALL = 22
TNT_OP_PING = 65280
TNT_FLAG_RETURN = 0x01
TNT_FLAG_ADD = 0x02
TNT_FLAG_REPLACE = 0x04
# Pre-generated results of pack_int_base128() for small arguments (0..16383)
_int_base128 = tuple(
(
struct_B.pack(val) if val < 128 else struct_BB.pack(val >> 7 & 0xff | 0x80, val & 0x7F)
for val in xrange(0x4000)
)
)
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self._bytes = None
def __bytes__(self):
return self._bytes
__str__ = __bytes__
@staticmethod
def header(request_type, body_length, request_id):
return struct_LLL.pack(request_type, body_length, request_id)
@staticmethod
def pack_int(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: int
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BL.pack(4, value)
@staticmethod
def pack_long(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: long
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BQ.pack(8, value)
@classmethod
def pack_int_base128(cls, value):
"""
Pack integer value using LEB128 encoding
:param value: integer value to encode
:type value: int
:return: encoded value
:rtype: bytes
"""
assert isinstance(value, int)
if value < 1 << 14:
return cls._int_base128[value]
if value < 1 << 21:
return struct_BBB.pack(
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 28:
return struct_BBBB.pack(
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 35:
return struct_BBBBB.pack(
value >> 28 & 0xff | 0x80,
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
raise OverflowError("Number is too large to be packed")
@classmethod
@classmethod
def pack_unicode(cls, value, charset="utf-8", errors="strict"):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: unicode
:return: packed value
:rtype: bytes
"""
assert isinstance(value, unicode)
try:
value = value.encode(charset, errors)
except UnicodeEncodeError as e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
def pack_field(self, value):
"""
Pack single field (string or integer value)
<field> ::= <int32_varint><data>
:param value: value to be packed
:type value: bytes, str, int or long
:return: packed value
:rtype: bytes
"""
if isinstance(value, str):
return self.pack_str(value)
elif isinstance(value, unicode):
return self.pack_unicode(value, self.charset, self.errors)
elif isinstance(value, int):
return self.pack_int(value)
elif isinstance(value, long):
return self.pack_long(value)
else:
raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
def pack_tuple(self, values):
"""
Pack tuple of values
<tuple> ::= <cardinality><field>+
:param value: tuple to be packed
:type value: tuple of scalar values (bytes, str or int)
:return: packed tuple
:rtype: bytes
"""
assert isinstance(values, (tuple, list))
cardinality = [struct_L.pack(len(values))]
packed_items = [self.pack_field(v) for v in values]
return b''.join(itertools.chain(cardinality, packed_items))
|
zlobspb/txtarantool
|
txtarantool.py
|
Request.pack_unicode
|
python
|
def pack_unicode(cls, value, charset="utf-8", errors="strict"):
assert isinstance(value, unicode)
try:
value = value.encode(charset, errors)
except UnicodeEncodeError as e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
|
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: unicode
:return: packed value
:rtype: bytes
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L212-L231
|
[
"def pack_int_base128(cls, value):\n \"\"\"\n Pack integer value using LEB128 encoding\n :param value: integer value to encode\n :type value: int\n\n :return: encoded value\n :rtype: bytes\n \"\"\"\n assert isinstance(value, int)\n if value < 1 << 14:\n return cls._int_base128[value]\n\n if value < 1 << 21:\n return struct_BBB.pack(\n value >> 14 & 0xff | 0x80,\n value >> 7 & 0xff | 0x80,\n value & 0x7F\n )\n\n if value < 1 << 28:\n return struct_BBBB.pack(\n value >> 21 & 0xff | 0x80,\n value >> 14 & 0xff | 0x80,\n value >> 7 & 0xff | 0x80,\n value & 0x7F\n )\n\n if value < 1 << 35:\n return struct_BBBBB.pack(\n value >> 28 & 0xff | 0x80,\n value >> 21 & 0xff | 0x80,\n value >> 14 & 0xff | 0x80,\n value >> 7 & 0xff | 0x80,\n value & 0x7F\n )\n\n raise OverflowError(\"Number is too large to be packed\")\n"
] |
class Request(object):
"""
Represents a single request to the server in compliance with the Tarantool protocol.
Responsible for data encapsulation and builds binary packet to be sent to the server.
This is the abstract base class. Specific request types are implemented by the inherited classes.
"""
TNT_OP_INSERT = 13
TNT_OP_SELECT = 17
TNT_OP_UPDATE = 19
TNT_OP_DELETE = 21
TNT_OP_CALL = 22
TNT_OP_PING = 65280
TNT_FLAG_RETURN = 0x01
TNT_FLAG_ADD = 0x02
TNT_FLAG_REPLACE = 0x04
# Pre-generated results of pack_int_base128() for small arguments (0..16383)
_int_base128 = tuple(
(
struct_B.pack(val) if val < 128 else struct_BB.pack(val >> 7 & 0xff | 0x80, val & 0x7F)
for val in xrange(0x4000)
)
)
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self._bytes = None
def __bytes__(self):
return self._bytes
__str__ = __bytes__
@staticmethod
def header(request_type, body_length, request_id):
return struct_LLL.pack(request_type, body_length, request_id)
@staticmethod
def pack_int(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: int
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BL.pack(4, value)
@staticmethod
def pack_long(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: long
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BQ.pack(8, value)
@classmethod
def pack_int_base128(cls, value):
"""
Pack integer value using LEB128 encoding
:param value: integer value to encode
:type value: int
:return: encoded value
:rtype: bytes
"""
assert isinstance(value, int)
if value < 1 << 14:
return cls._int_base128[value]
if value < 1 << 21:
return struct_BBB.pack(
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 28:
return struct_BBBB.pack(
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 35:
return struct_BBBBB.pack(
value >> 28 & 0xff | 0x80,
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
raise OverflowError("Number is too large to be packed")
@classmethod
def pack_str(cls, value):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
"""
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
@classmethod
def pack_field(self, value):
"""
Pack single field (string or integer value)
<field> ::= <int32_varint><data>
:param value: value to be packed
:type value: bytes, str, int or long
:return: packed value
:rtype: bytes
"""
if isinstance(value, str):
return self.pack_str(value)
elif isinstance(value, unicode):
return self.pack_unicode(value, self.charset, self.errors)
elif isinstance(value, int):
return self.pack_int(value)
elif isinstance(value, long):
return self.pack_long(value)
else:
raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
def pack_tuple(self, values):
"""
Pack tuple of values
<tuple> ::= <cardinality><field>+
:param value: tuple to be packed
:type value: tuple of scalar values (bytes, str or int)
:return: packed tuple
:rtype: bytes
"""
assert isinstance(values, (tuple, list))
cardinality = [struct_L.pack(len(values))]
packed_items = [self.pack_field(v) for v in values]
return b''.join(itertools.chain(cardinality, packed_items))
|
zlobspb/txtarantool
|
txtarantool.py
|
Request.pack_field
|
python
|
def pack_field(self, value):
if isinstance(value, str):
return self.pack_str(value)
elif isinstance(value, unicode):
return self.pack_unicode(value, self.charset, self.errors)
elif isinstance(value, int):
return self.pack_int(value)
elif isinstance(value, long):
return self.pack_long(value)
else:
raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
|
Pack single field (string or integer value)
<field> ::= <int32_varint><data>
:param value: value to be packed
:type value: bytes, str, int or long
:return: packed value
:rtype: bytes
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L233-L253
|
[
"def pack_int(value):\n \"\"\"\n Pack integer field\n <field> ::= <int32_varint><data>\n\n :param value: integer value to be packed\n :type value: int\n\n :return: packed value\n :rtype: bytes\n \"\"\"\n assert isinstance(value, (int, long))\n return struct_BL.pack(4, value)\n",
"def pack_long(value):\n \"\"\"\n Pack integer field\n <field> ::= <int32_varint><data>\n\n :param value: integer value to be packed\n :type value: long\n\n :return: packed value\n :rtype: bytes\n \"\"\"\n assert isinstance(value, (int, long))\n return struct_BQ.pack(8, value)\n",
"def pack_str(cls, value):\n \"\"\"\n Pack string field\n <field> ::= <int32_varint><data>\n\n :param value: string to be packed\n :type value: bytes or str\n\n :return: packed value\n :rtype: bytes\n \"\"\"\n assert isinstance(value, str)\n value_len_packed = cls.pack_int_base128(len(value))\n return struct.pack(\"<%ds%ds\" % (len(value_len_packed), len(value)), value_len_packed, value)\n",
"def pack_unicode(cls, value, charset=\"utf-8\", errors=\"strict\"):\n \"\"\"\n Pack string field\n <field> ::= <int32_varint><data>\n\n :param value: string to be packed\n :type value: unicode\n\n :return: packed value\n :rtype: bytes\n \"\"\"\n assert isinstance(value, unicode)\n\n try:\n value = value.encode(charset, errors)\n except UnicodeEncodeError as e:\n raise InvalidData(\"Error encoding unicode value '%s': %s\" % (repr(value), e))\n\n value_len_packed = cls.pack_int_base128(len(value))\n return struct.pack(\"<%ds%ds\" % (len(value_len_packed), len(value)), value_len_packed, value)\n"
] |
class Request(object):
"""
Represents a single request to the server in compliance with the Tarantool protocol.
Responsible for data encapsulation and builds binary packet to be sent to the server.
This is the abstract base class. Specific request types are implemented by the inherited classes.
"""
TNT_OP_INSERT = 13
TNT_OP_SELECT = 17
TNT_OP_UPDATE = 19
TNT_OP_DELETE = 21
TNT_OP_CALL = 22
TNT_OP_PING = 65280
TNT_FLAG_RETURN = 0x01
TNT_FLAG_ADD = 0x02
TNT_FLAG_REPLACE = 0x04
# Pre-generated results of pack_int_base128() for small arguments (0..16383)
_int_base128 = tuple(
(
struct_B.pack(val) if val < 128 else struct_BB.pack(val >> 7 & 0xff | 0x80, val & 0x7F)
for val in xrange(0x4000)
)
)
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self._bytes = None
def __bytes__(self):
return self._bytes
__str__ = __bytes__
@staticmethod
def header(request_type, body_length, request_id):
return struct_LLL.pack(request_type, body_length, request_id)
@staticmethod
def pack_int(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: int
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BL.pack(4, value)
@staticmethod
def pack_long(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: long
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BQ.pack(8, value)
@classmethod
def pack_int_base128(cls, value):
"""
Pack integer value using LEB128 encoding
:param value: integer value to encode
:type value: int
:return: encoded value
:rtype: bytes
"""
assert isinstance(value, int)
if value < 1 << 14:
return cls._int_base128[value]
if value < 1 << 21:
return struct_BBB.pack(
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 28:
return struct_BBBB.pack(
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 35:
return struct_BBBBB.pack(
value >> 28 & 0xff | 0x80,
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
raise OverflowError("Number is too large to be packed")
@classmethod
def pack_str(cls, value):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
"""
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
@classmethod
def pack_unicode(cls, value, charset="utf-8", errors="strict"):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: unicode
:return: packed value
:rtype: bytes
"""
assert isinstance(value, unicode)
try:
value = value.encode(charset, errors)
except UnicodeEncodeError as e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
def pack_tuple(self, values):
"""
Pack tuple of values
<tuple> ::= <cardinality><field>+
:param value: tuple to be packed
:type value: tuple of scalar values (bytes, str or int)
:return: packed tuple
:rtype: bytes
"""
assert isinstance(values, (tuple, list))
cardinality = [struct_L.pack(len(values))]
packed_items = [self.pack_field(v) for v in values]
return b''.join(itertools.chain(cardinality, packed_items))
|
zlobspb/txtarantool
|
txtarantool.py
|
Request.pack_tuple
|
python
|
def pack_tuple(self, values):
assert isinstance(values, (tuple, list))
cardinality = [struct_L.pack(len(values))]
packed_items = [self.pack_field(v) for v in values]
return b''.join(itertools.chain(cardinality, packed_items))
|
Pack tuple of values
<tuple> ::= <cardinality><field>+
:param value: tuple to be packed
:type value: tuple of scalar values (bytes, str or int)
:return: packed tuple
:rtype: bytes
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L255-L269
| null |
class Request(object):
"""
Represents a single request to the server in compliance with the Tarantool protocol.
Responsible for data encapsulation and builds binary packet to be sent to the server.
This is the abstract base class. Specific request types are implemented by the inherited classes.
"""
TNT_OP_INSERT = 13
TNT_OP_SELECT = 17
TNT_OP_UPDATE = 19
TNT_OP_DELETE = 21
TNT_OP_CALL = 22
TNT_OP_PING = 65280
TNT_FLAG_RETURN = 0x01
TNT_FLAG_ADD = 0x02
TNT_FLAG_REPLACE = 0x04
# Pre-generated results of pack_int_base128() for small arguments (0..16383)
_int_base128 = tuple(
(
struct_B.pack(val) if val < 128 else struct_BB.pack(val >> 7 & 0xff | 0x80, val & 0x7F)
for val in xrange(0x4000)
)
)
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self._bytes = None
def __bytes__(self):
return self._bytes
__str__ = __bytes__
@staticmethod
def header(request_type, body_length, request_id):
return struct_LLL.pack(request_type, body_length, request_id)
@staticmethod
def pack_int(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: int
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BL.pack(4, value)
@staticmethod
def pack_long(value):
"""
Pack integer field
<field> ::= <int32_varint><data>
:param value: integer value to be packed
:type value: long
:return: packed value
:rtype: bytes
"""
assert isinstance(value, (int, long))
return struct_BQ.pack(8, value)
@classmethod
def pack_int_base128(cls, value):
"""
Pack integer value using LEB128 encoding
:param value: integer value to encode
:type value: int
:return: encoded value
:rtype: bytes
"""
assert isinstance(value, int)
if value < 1 << 14:
return cls._int_base128[value]
if value < 1 << 21:
return struct_BBB.pack(
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 28:
return struct_BBBB.pack(
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
if value < 1 << 35:
return struct_BBBBB.pack(
value >> 28 & 0xff | 0x80,
value >> 21 & 0xff | 0x80,
value >> 14 & 0xff | 0x80,
value >> 7 & 0xff | 0x80,
value & 0x7F
)
raise OverflowError("Number is too large to be packed")
@classmethod
def pack_str(cls, value):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: bytes or str
:return: packed value
:rtype: bytes
"""
assert isinstance(value, str)
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
@classmethod
def pack_unicode(cls, value, charset="utf-8", errors="strict"):
"""
Pack string field
<field> ::= <int32_varint><data>
:param value: string to be packed
:type value: unicode
:return: packed value
:rtype: bytes
"""
assert isinstance(value, unicode)
try:
value = value.encode(charset, errors)
except UnicodeEncodeError as e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
value_len_packed = cls.pack_int_base128(len(value))
return struct.pack("<%ds%ds" % (len(value_len_packed), len(value)), value_len_packed, value)
def pack_field(self, value):
"""
Pack single field (string or integer value)
<field> ::= <int32_varint><data>
:param value: value to be packed
:type value: bytes, str, int or long
:return: packed value
:rtype: bytes
"""
if isinstance(value, str):
return self.pack_str(value)
elif isinstance(value, unicode):
return self.pack_unicode(value, self.charset, self.errors)
elif isinstance(value, int):
return self.pack_int(value)
elif isinstance(value, long):
return self.pack_long(value)
else:
raise TypeError("Invalid argument type '%s'. Only 'str', 'int' or long expected" % (type(value).__name__))
|
zlobspb/txtarantool
|
txtarantool.py
|
Response._unpack_int_base128
|
python
|
def _unpack_int_base128(varint, offset):
res = ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
return res, offset + 1
|
Implement Perl unpack's 'w' option, aka base 128 decoding.
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L548-L563
| null |
class Response(list):
"""
Represents a single response from the server in compliance with the Tarantool protocol.
Responsible for data encapsulation (i.e. received list of tuples) and parses binary
packet received from the server.
"""
def __init__(self, header, body, charset="utf-8", errors="strict", field_types=None):
"""
Create an instance of `Response` using data received from the server.
__init__() itself reads data from the socket, parses response body and
sets appropriate instance attributes.
:param header: header of the response
:type header: array of bytes
:param body: body of the response
:type body: array of bytes
"""
# This is not necessary, because underlying list data structures are created in the __new__(). But let it be.
super(Response, self).__init__()
self.charset = charset
self.errors = errors
self._body_length = None
self._request_id = None
self._request_type = None
self._completion_status = None
self._return_code = None
self._return_message = None
self._rowcount = None
self.field_types = field_types
# Unpack header
if isinstance(header, (tuple, list)):
self._request_type, self._body_length, self._request_id = header
else:
self._request_type, self._body_length, self._request_id = struct_LLL.unpack(header)
if body:
self._unpack_body(body)
@staticmethod
def _unpack_tuple(self, buff):
"""
Unpacks the tuple from byte buffer
<tuple> ::= <cardinality><field>+
:param buff: byte array of the form <cardinality><field>+
:type buff: ctypes buffer or bytes
:return: tuple of unpacked values
:rtype: tuple
"""
cardinality = struct_L.unpack_from(buff)[0]
_tuple = ['']*cardinality
offset = 4 # The first 4 bytes in the response body is the <count> we have already read
for i in xrange(cardinality):
field_size, offset = self._unpack_int_base128(buff, offset)
field_data = struct.unpack_from("<%ds" % field_size, buff, offset)[0]
_tuple[i] = field(field_data)
offset += field_size
return tuple(_tuple)
def _unpack_body(self, buff):
"""
Parse the response body.
After body unpacking its data available as python list of tuples
For each request type the response body has the same format:
<insert_response_body> ::= <count> | <count><fq_tuple>
<update_response_body> ::= <count> | <count><fq_tuple>
<delete_response_body> ::= <count> | <count><fq_tuple>
<select_response_body> ::= <count><fq_tuple>*
<call_response_body> ::= <count><fq_tuple>
:param buff: buffer containing request body
:type byff: ctypes buffer
"""
# Unpack <return_code> and <count> (how many records affected or selected)
self._return_code = struct_L.unpack_from(buff, offset=0)[0]
# Separate return_code and completion_code
self._completion_status = self._return_code & 0x00ff
self._return_code >>= 8
# In case of an error unpack the body as an error message
if self._return_code != 0:
self._return_message = unicode(buff[4:-1], self.charset, self.errors)
if self._completion_status == 2:
raise TarantoolError(self._return_code, self._return_message)
# Unpack <count> (how many records affected or selected)
self._rowcount = struct_L.unpack_from(buff, offset=4)[0]
# If the response doesn't contain any tuple - there is nothing to unpack
if self._body_length == 8:
return
# Parse response tuples (<fq_tuple>)
if self._rowcount > 0:
offset = 8 # The first 4 bytes in the response body is the <count> we have already read
while offset < self._body_length:
# In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).
# Attribute <size> takes into account only size of tuple's <field> payload,
# but does not include 4-byte of <cardinality> field.
#Therefore the actual size of the <tuple> is greater to 4 bytes.
tuple_size = struct.unpack_from("<L", buff, offset)[0] + 4
tuple_data = struct.unpack_from("<%ds" % (tuple_size), buff, offset+4)[0]
tuple_value = self._unpack_tuple(tuple_data)
if self.field_types:
self.append(self._cast_tuple(tuple_value))
else:
self.append(tuple_value)
offset = offset + tuple_size + 4 # This '4' is a size of <size> attribute
@property
def completion_status(self):
"""
:type: int
Request completion status.
There are only three completion status codes in use:
* ``0`` -- "success"; the only possible :attr:`return_code` with this status is ``0``
* ``1`` -- "try again"; an indicator of an intermittent error.
This status is handled automatically by this module.
* ``2`` -- "error"; in this case :attr:`return_code` holds the actual error.
"""
return self._completion_status
@property
def rowcount(self):
"""
:type: int
Number of rows affected or returned by a query.
"""
return self._rowcount
@property
def return_code(self):
"""
:type: int
Required field in the server response.
Value of :attr:`return_code` can be ``0`` if request was sucessfull or contains an error code.
If :attr:`return_code` is non-zero than :attr:`return_message` contains an error message.
"""
return self._return_code
@property
def return_message(self):
"""
:type: str
The error message returned by the server in case of :attr:`return_code` is non-zero.
"""
return self._return_message
def _cast_field(self, cast_to, value):
"""
Convert field type from raw bytes to native python type
:param cast_to: native python type to cast to
:type cast_to: a type object (one of bytes, int, unicode (str for py3k))
:param value: raw value from the database
:type value: bytes
:return: converted value
:rtype: value of native python type (one of bytes, int, unicode (str for py3k))
"""
if cast_to in (int, long, str):
return cast_to(value)
elif cast_to == unicode:
try:
value = value.decode(self.charset, self.errors)
except UnicodeEncodeError, e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
return value
elif cast_to in (any, bytes):
return value
else:
raise TypeError("Invalid field type %s" % (cast_to))
def _cast_tuple(self, values):
"""
Convert values of the tuple from raw bytes to native python types
:param values: tuple of the raw database values
:type value: tuple of bytes
:return: converted tuple value
:rtype: value of native python types (bytes, int, unicode (or str for py3k))
"""
result = []
for i, value in enumerate(values):
if i < len(self.field_types):
result.append(self._cast_field(self.field_types[i], value))
else:
result.append(self._cast_field(self.field_types[-1], value))
return tuple(result)
def __repr__(self):
"""
Return user friendy string representation of the object.
Useful for the interactive sessions and debugging.
:rtype: str or None
"""
# If response is not empty then return default list representation
# If there was an SELECT request - return list representation even it is empty
if self._request_type == Request.TNT_OP_SELECT or len(self):
return super(Response, self).__repr__()
# Ping
if self._request_type == Request.TNT_OP_PING:
return "ping ok"
# Return string of form "N records affected"
affected = str(self.rowcount) + (" record" if self.rowcount == 1 else " records")
if self._request_type == Request.TNT_OP_DELETE:
return affected + " deleted"
if self._request_type == Request.TNT_OP_INSERT:
return affected + " inserted"
if self._request_type == Request.TNT_OP_UPDATE:
return affected + " updated"
return affected + " affected"
|
zlobspb/txtarantool
|
txtarantool.py
|
Response._unpack_tuple
|
python
|
def _unpack_tuple(self, buff):
cardinality = struct_L.unpack_from(buff)[0]
_tuple = ['']*cardinality
offset = 4 # The first 4 bytes in the response body is the <count> we have already read
for i in xrange(cardinality):
field_size, offset = self._unpack_int_base128(buff, offset)
field_data = struct.unpack_from("<%ds" % field_size, buff, offset)[0]
_tuple[i] = field(field_data)
offset += field_size
return tuple(_tuple)
|
Unpacks the tuple from byte buffer
<tuple> ::= <cardinality><field>+
:param buff: byte array of the form <cardinality><field>+
:type buff: ctypes buffer or bytes
:return: tuple of unpacked values
:rtype: tuple
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L565-L585
| null |
class Response(list):
"""
Represents a single response from the server in compliance with the Tarantool protocol.
Responsible for data encapsulation (i.e. received list of tuples) and parses binary
packet received from the server.
"""
def __init__(self, header, body, charset="utf-8", errors="strict", field_types=None):
"""
Create an instance of `Response` using data received from the server.
__init__() itself reads data from the socket, parses response body and
sets appropriate instance attributes.
:param header: header of the response
:type header: array of bytes
:param body: body of the response
:type body: array of bytes
"""
# This is not necessary, because underlying list data structures are created in the __new__(). But let it be.
super(Response, self).__init__()
self.charset = charset
self.errors = errors
self._body_length = None
self._request_id = None
self._request_type = None
self._completion_status = None
self._return_code = None
self._return_message = None
self._rowcount = None
self.field_types = field_types
# Unpack header
if isinstance(header, (tuple, list)):
self._request_type, self._body_length, self._request_id = header
else:
self._request_type, self._body_length, self._request_id = struct_LLL.unpack(header)
if body:
self._unpack_body(body)
@staticmethod
def _unpack_int_base128(varint, offset):
"""Implement Perl unpack's 'w' option, aka base 128 decoding."""
res = ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
return res, offset + 1
def _unpack_body(self, buff):
"""
Parse the response body.
After body unpacking its data available as python list of tuples
For each request type the response body has the same format:
<insert_response_body> ::= <count> | <count><fq_tuple>
<update_response_body> ::= <count> | <count><fq_tuple>
<delete_response_body> ::= <count> | <count><fq_tuple>
<select_response_body> ::= <count><fq_tuple>*
<call_response_body> ::= <count><fq_tuple>
:param buff: buffer containing request body
:type byff: ctypes buffer
"""
# Unpack <return_code> and <count> (how many records affected or selected)
self._return_code = struct_L.unpack_from(buff, offset=0)[0]
# Separate return_code and completion_code
self._completion_status = self._return_code & 0x00ff
self._return_code >>= 8
# In case of an error unpack the body as an error message
if self._return_code != 0:
self._return_message = unicode(buff[4:-1], self.charset, self.errors)
if self._completion_status == 2:
raise TarantoolError(self._return_code, self._return_message)
# Unpack <count> (how many records affected or selected)
self._rowcount = struct_L.unpack_from(buff, offset=4)[0]
# If the response doesn't contain any tuple - there is nothing to unpack
if self._body_length == 8:
return
# Parse response tuples (<fq_tuple>)
if self._rowcount > 0:
offset = 8 # The first 4 bytes in the response body is the <count> we have already read
while offset < self._body_length:
# In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).
# Attribute <size> takes into account only size of tuple's <field> payload,
# but does not include 4-byte of <cardinality> field.
#Therefore the actual size of the <tuple> is greater to 4 bytes.
tuple_size = struct.unpack_from("<L", buff, offset)[0] + 4
tuple_data = struct.unpack_from("<%ds" % (tuple_size), buff, offset+4)[0]
tuple_value = self._unpack_tuple(tuple_data)
if self.field_types:
self.append(self._cast_tuple(tuple_value))
else:
self.append(tuple_value)
offset = offset + tuple_size + 4 # This '4' is a size of <size> attribute
@property
def completion_status(self):
"""
:type: int
Request completion status.
There are only three completion status codes in use:
* ``0`` -- "success"; the only possible :attr:`return_code` with this status is ``0``
* ``1`` -- "try again"; an indicator of an intermittent error.
This status is handled automatically by this module.
* ``2`` -- "error"; in this case :attr:`return_code` holds the actual error.
"""
return self._completion_status
@property
def rowcount(self):
"""
:type: int
Number of rows affected or returned by a query.
"""
return self._rowcount
@property
def return_code(self):
"""
:type: int
Required field in the server response.
Value of :attr:`return_code` can be ``0`` if request was sucessfull or contains an error code.
If :attr:`return_code` is non-zero than :attr:`return_message` contains an error message.
"""
return self._return_code
@property
def return_message(self):
"""
:type: str
The error message returned by the server in case of :attr:`return_code` is non-zero.
"""
return self._return_message
def _cast_field(self, cast_to, value):
"""
Convert field type from raw bytes to native python type
:param cast_to: native python type to cast to
:type cast_to: a type object (one of bytes, int, unicode (str for py3k))
:param value: raw value from the database
:type value: bytes
:return: converted value
:rtype: value of native python type (one of bytes, int, unicode (str for py3k))
"""
if cast_to in (int, long, str):
return cast_to(value)
elif cast_to == unicode:
try:
value = value.decode(self.charset, self.errors)
except UnicodeEncodeError, e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
return value
elif cast_to in (any, bytes):
return value
else:
raise TypeError("Invalid field type %s" % (cast_to))
def _cast_tuple(self, values):
"""
Convert values of the tuple from raw bytes to native python types
:param values: tuple of the raw database values
:type value: tuple of bytes
:return: converted tuple value
:rtype: value of native python types (bytes, int, unicode (or str for py3k))
"""
result = []
for i, value in enumerate(values):
if i < len(self.field_types):
result.append(self._cast_field(self.field_types[i], value))
else:
result.append(self._cast_field(self.field_types[-1], value))
return tuple(result)
def __repr__(self):
"""
Return user friendy string representation of the object.
Useful for the interactive sessions and debugging.
:rtype: str or None
"""
# If response is not empty then return default list representation
# If there was an SELECT request - return list representation even it is empty
if self._request_type == Request.TNT_OP_SELECT or len(self):
return super(Response, self).__repr__()
# Ping
if self._request_type == Request.TNT_OP_PING:
return "ping ok"
# Return string of form "N records affected"
affected = str(self.rowcount) + (" record" if self.rowcount == 1 else " records")
if self._request_type == Request.TNT_OP_DELETE:
return affected + " deleted"
if self._request_type == Request.TNT_OP_INSERT:
return affected + " inserted"
if self._request_type == Request.TNT_OP_UPDATE:
return affected + " updated"
return affected + " affected"
|
zlobspb/txtarantool
|
txtarantool.py
|
Response._unpack_body
|
python
|
def _unpack_body(self, buff):
# Unpack <return_code> and <count> (how many records affected or selected)
self._return_code = struct_L.unpack_from(buff, offset=0)[0]
# Separate return_code and completion_code
self._completion_status = self._return_code & 0x00ff
self._return_code >>= 8
# In case of an error unpack the body as an error message
if self._return_code != 0:
self._return_message = unicode(buff[4:-1], self.charset, self.errors)
if self._completion_status == 2:
raise TarantoolError(self._return_code, self._return_message)
# Unpack <count> (how many records affected or selected)
self._rowcount = struct_L.unpack_from(buff, offset=4)[0]
# If the response doesn't contain any tuple - there is nothing to unpack
if self._body_length == 8:
return
# Parse response tuples (<fq_tuple>)
if self._rowcount > 0:
offset = 8 # The first 4 bytes in the response body is the <count> we have already read
while offset < self._body_length:
# In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).
# Attribute <size> takes into account only size of tuple's <field> payload,
# but does not include 4-byte of <cardinality> field.
#Therefore the actual size of the <tuple> is greater to 4 bytes.
tuple_size = struct.unpack_from("<L", buff, offset)[0] + 4
tuple_data = struct.unpack_from("<%ds" % (tuple_size), buff, offset+4)[0]
tuple_value = self._unpack_tuple(tuple_data)
if self.field_types:
self.append(self._cast_tuple(tuple_value))
else:
self.append(tuple_value)
offset = offset + tuple_size + 4
|
Parse the response body.
After body unpacking its data available as python list of tuples
For each request type the response body has the same format:
<insert_response_body> ::= <count> | <count><fq_tuple>
<update_response_body> ::= <count> | <count><fq_tuple>
<delete_response_body> ::= <count> | <count><fq_tuple>
<select_response_body> ::= <count><fq_tuple>*
<call_response_body> ::= <count><fq_tuple>
:param buff: buffer containing request body
:type byff: ctypes buffer
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L587-L639
|
[
"def _unpack_tuple(self, buff):\n \"\"\"\n Unpacks the tuple from byte buffer\n <tuple> ::= <cardinality><field>+\n\n :param buff: byte array of the form <cardinality><field>+\n :type buff: ctypes buffer or bytes\n\n :return: tuple of unpacked values\n :rtype: tuple\n \"\"\"\n cardinality = struct_L.unpack_from(buff)[0]\n _tuple = ['']*cardinality\n offset = 4 # The first 4 bytes in the response body is the <count> we have already read\n for i in xrange(cardinality):\n field_size, offset = self._unpack_int_base128(buff, offset)\n field_data = struct.unpack_from(\"<%ds\" % field_size, buff, offset)[0]\n _tuple[i] = field(field_data)\n offset += field_size\n\n return tuple(_tuple)\n",
"def _cast_tuple(self, values):\n \"\"\"\n Convert values of the tuple from raw bytes to native python types\n\n :param values: tuple of the raw database values\n :type value: tuple of bytes\n\n :return: converted tuple value\n :rtype: value of native python types (bytes, int, unicode (or str for py3k))\n \"\"\"\n result = []\n for i, value in enumerate(values):\n if i < len(self.field_types):\n result.append(self._cast_field(self.field_types[i], value))\n else:\n result.append(self._cast_field(self.field_types[-1], value))\n\n return tuple(result)\n"
] |
class Response(list):
"""
Represents a single response from the server in compliance with the Tarantool protocol.
Responsible for data encapsulation (i.e. received list of tuples) and parses binary
packet received from the server.
"""
def __init__(self, header, body, charset="utf-8", errors="strict", field_types=None):
"""
Create an instance of `Response` using data received from the server.
__init__() itself reads data from the socket, parses response body and
sets appropriate instance attributes.
:param header: header of the response
:type header: array of bytes
:param body: body of the response
:type body: array of bytes
"""
# This is not necessary, because underlying list data structures are created in the __new__(). But let it be.
super(Response, self).__init__()
self.charset = charset
self.errors = errors
self._body_length = None
self._request_id = None
self._request_type = None
self._completion_status = None
self._return_code = None
self._return_message = None
self._rowcount = None
self.field_types = field_types
# Unpack header
if isinstance(header, (tuple, list)):
self._request_type, self._body_length, self._request_id = header
else:
self._request_type, self._body_length, self._request_id = struct_LLL.unpack(header)
if body:
self._unpack_body(body)
@staticmethod
def _unpack_int_base128(varint, offset):
"""Implement Perl unpack's 'w' option, aka base 128 decoding."""
res = ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
return res, offset + 1
def _unpack_tuple(self, buff):
"""
Unpacks the tuple from byte buffer
<tuple> ::= <cardinality><field>+
:param buff: byte array of the form <cardinality><field>+
:type buff: ctypes buffer or bytes
:return: tuple of unpacked values
:rtype: tuple
"""
cardinality = struct_L.unpack_from(buff)[0]
_tuple = ['']*cardinality
offset = 4 # The first 4 bytes in the response body is the <count> we have already read
for i in xrange(cardinality):
field_size, offset = self._unpack_int_base128(buff, offset)
field_data = struct.unpack_from("<%ds" % field_size, buff, offset)[0]
_tuple[i] = field(field_data)
offset += field_size
return tuple(_tuple)
# This '4' is a size of <size> attribute
@property
def completion_status(self):
"""
:type: int
Request completion status.
There are only three completion status codes in use:
* ``0`` -- "success"; the only possible :attr:`return_code` with this status is ``0``
* ``1`` -- "try again"; an indicator of an intermittent error.
This status is handled automatically by this module.
* ``2`` -- "error"; in this case :attr:`return_code` holds the actual error.
"""
return self._completion_status
@property
def rowcount(self):
"""
:type: int
Number of rows affected or returned by a query.
"""
return self._rowcount
@property
def return_code(self):
"""
:type: int
Required field in the server response.
Value of :attr:`return_code` can be ``0`` if request was sucessfull or contains an error code.
If :attr:`return_code` is non-zero than :attr:`return_message` contains an error message.
"""
return self._return_code
@property
def return_message(self):
"""
:type: str
The error message returned by the server in case of :attr:`return_code` is non-zero.
"""
return self._return_message
def _cast_field(self, cast_to, value):
"""
Convert field type from raw bytes to native python type
:param cast_to: native python type to cast to
:type cast_to: a type object (one of bytes, int, unicode (str for py3k))
:param value: raw value from the database
:type value: bytes
:return: converted value
:rtype: value of native python type (one of bytes, int, unicode (str for py3k))
"""
if cast_to in (int, long, str):
return cast_to(value)
elif cast_to == unicode:
try:
value = value.decode(self.charset, self.errors)
except UnicodeEncodeError, e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
return value
elif cast_to in (any, bytes):
return value
else:
raise TypeError("Invalid field type %s" % (cast_to))
def _cast_tuple(self, values):
"""
Convert values of the tuple from raw bytes to native python types
:param values: tuple of the raw database values
:type value: tuple of bytes
:return: converted tuple value
:rtype: value of native python types (bytes, int, unicode (or str for py3k))
"""
result = []
for i, value in enumerate(values):
if i < len(self.field_types):
result.append(self._cast_field(self.field_types[i], value))
else:
result.append(self._cast_field(self.field_types[-1], value))
return tuple(result)
def __repr__(self):
"""
Return user friendy string representation of the object.
Useful for the interactive sessions and debugging.
:rtype: str or None
"""
# If response is not empty then return default list representation
# If there was an SELECT request - return list representation even it is empty
if self._request_type == Request.TNT_OP_SELECT or len(self):
return super(Response, self).__repr__()
# Ping
if self._request_type == Request.TNT_OP_PING:
return "ping ok"
# Return string of form "N records affected"
affected = str(self.rowcount) + (" record" if self.rowcount == 1 else " records")
if self._request_type == Request.TNT_OP_DELETE:
return affected + " deleted"
if self._request_type == Request.TNT_OP_INSERT:
return affected + " inserted"
if self._request_type == Request.TNT_OP_UPDATE:
return affected + " updated"
return affected + " affected"
|
zlobspb/txtarantool
|
txtarantool.py
|
Response._cast_field
|
python
|
def _cast_field(self, cast_to, value):
if cast_to in (int, long, str):
return cast_to(value)
elif cast_to == unicode:
try:
value = value.decode(self.charset, self.errors)
except UnicodeEncodeError, e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
return value
elif cast_to in (any, bytes):
return value
else:
raise TypeError("Invalid field type %s" % (cast_to))
|
Convert field type from raw bytes to native python type
:param cast_to: native python type to cast to
:type cast_to: a type object (one of bytes, int, unicode (str for py3k))
:param value: raw value from the database
:type value: bytes
:return: converted value
:rtype: value of native python type (one of bytes, int, unicode (str for py3k))
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L686-L710
| null |
class Response(list):
"""
Represents a single response from the server in compliance with the Tarantool protocol.
Responsible for data encapsulation (i.e. received list of tuples) and parses binary
packet received from the server.
"""
def __init__(self, header, body, charset="utf-8", errors="strict", field_types=None):
"""
Create an instance of `Response` using data received from the server.
__init__() itself reads data from the socket, parses response body and
sets appropriate instance attributes.
:param header: header of the response
:type header: array of bytes
:param body: body of the response
:type body: array of bytes
"""
# This is not necessary, because underlying list data structures are created in the __new__(). But let it be.
super(Response, self).__init__()
self.charset = charset
self.errors = errors
self._body_length = None
self._request_id = None
self._request_type = None
self._completion_status = None
self._return_code = None
self._return_message = None
self._rowcount = None
self.field_types = field_types
# Unpack header
if isinstance(header, (tuple, list)):
self._request_type, self._body_length, self._request_id = header
else:
self._request_type, self._body_length, self._request_id = struct_LLL.unpack(header)
if body:
self._unpack_body(body)
@staticmethod
def _unpack_int_base128(varint, offset):
"""Implement Perl unpack's 'w' option, aka base 128 decoding."""
res = ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
return res, offset + 1
def _unpack_tuple(self, buff):
"""
Unpacks the tuple from byte buffer
<tuple> ::= <cardinality><field>+
:param buff: byte array of the form <cardinality><field>+
:type buff: ctypes buffer or bytes
:return: tuple of unpacked values
:rtype: tuple
"""
cardinality = struct_L.unpack_from(buff)[0]
_tuple = ['']*cardinality
offset = 4 # The first 4 bytes in the response body is the <count> we have already read
for i in xrange(cardinality):
field_size, offset = self._unpack_int_base128(buff, offset)
field_data = struct.unpack_from("<%ds" % field_size, buff, offset)[0]
_tuple[i] = field(field_data)
offset += field_size
return tuple(_tuple)
def _unpack_body(self, buff):
"""
Parse the response body.
After body unpacking its data available as python list of tuples
For each request type the response body has the same format:
<insert_response_body> ::= <count> | <count><fq_tuple>
<update_response_body> ::= <count> | <count><fq_tuple>
<delete_response_body> ::= <count> | <count><fq_tuple>
<select_response_body> ::= <count><fq_tuple>*
<call_response_body> ::= <count><fq_tuple>
:param buff: buffer containing request body
:type byff: ctypes buffer
"""
# Unpack <return_code> and <count> (how many records affected or selected)
self._return_code = struct_L.unpack_from(buff, offset=0)[0]
# Separate return_code and completion_code
self._completion_status = self._return_code & 0x00ff
self._return_code >>= 8
# In case of an error unpack the body as an error message
if self._return_code != 0:
self._return_message = unicode(buff[4:-1], self.charset, self.errors)
if self._completion_status == 2:
raise TarantoolError(self._return_code, self._return_message)
# Unpack <count> (how many records affected or selected)
self._rowcount = struct_L.unpack_from(buff, offset=4)[0]
# If the response doesn't contain any tuple - there is nothing to unpack
if self._body_length == 8:
return
# Parse response tuples (<fq_tuple>)
if self._rowcount > 0:
offset = 8 # The first 4 bytes in the response body is the <count> we have already read
while offset < self._body_length:
# In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).
# Attribute <size> takes into account only size of tuple's <field> payload,
# but does not include 4-byte of <cardinality> field.
#Therefore the actual size of the <tuple> is greater to 4 bytes.
tuple_size = struct.unpack_from("<L", buff, offset)[0] + 4
tuple_data = struct.unpack_from("<%ds" % (tuple_size), buff, offset+4)[0]
tuple_value = self._unpack_tuple(tuple_data)
if self.field_types:
self.append(self._cast_tuple(tuple_value))
else:
self.append(tuple_value)
offset = offset + tuple_size + 4 # This '4' is a size of <size> attribute
@property
def completion_status(self):
"""
:type: int
Request completion status.
There are only three completion status codes in use:
* ``0`` -- "success"; the only possible :attr:`return_code` with this status is ``0``
* ``1`` -- "try again"; an indicator of an intermittent error.
This status is handled automatically by this module.
* ``2`` -- "error"; in this case :attr:`return_code` holds the actual error.
"""
return self._completion_status
@property
def rowcount(self):
"""
:type: int
Number of rows affected or returned by a query.
"""
return self._rowcount
@property
def return_code(self):
"""
:type: int
Required field in the server response.
Value of :attr:`return_code` can be ``0`` if request was sucessfull or contains an error code.
If :attr:`return_code` is non-zero than :attr:`return_message` contains an error message.
"""
return self._return_code
@property
def return_message(self):
"""
:type: str
The error message returned by the server in case of :attr:`return_code` is non-zero.
"""
return self._return_message
def _cast_tuple(self, values):
"""
Convert values of the tuple from raw bytes to native python types
:param values: tuple of the raw database values
:type value: tuple of bytes
:return: converted tuple value
:rtype: value of native python types (bytes, int, unicode (or str for py3k))
"""
result = []
for i, value in enumerate(values):
if i < len(self.field_types):
result.append(self._cast_field(self.field_types[i], value))
else:
result.append(self._cast_field(self.field_types[-1], value))
return tuple(result)
def __repr__(self):
"""
Return user friendy string representation of the object.
Useful for the interactive sessions and debugging.
:rtype: str or None
"""
# If response is not empty then return default list representation
# If there was an SELECT request - return list representation even it is empty
if self._request_type == Request.TNT_OP_SELECT or len(self):
return super(Response, self).__repr__()
# Ping
if self._request_type == Request.TNT_OP_PING:
return "ping ok"
# Return string of form "N records affected"
affected = str(self.rowcount) + (" record" if self.rowcount == 1 else " records")
if self._request_type == Request.TNT_OP_DELETE:
return affected + " deleted"
if self._request_type == Request.TNT_OP_INSERT:
return affected + " inserted"
if self._request_type == Request.TNT_OP_UPDATE:
return affected + " updated"
return affected + " affected"
|
zlobspb/txtarantool
|
txtarantool.py
|
Response._cast_tuple
|
python
|
def _cast_tuple(self, values):
result = []
for i, value in enumerate(values):
if i < len(self.field_types):
result.append(self._cast_field(self.field_types[i], value))
else:
result.append(self._cast_field(self.field_types[-1], value))
return tuple(result)
|
Convert values of the tuple from raw bytes to native python types
:param values: tuple of the raw database values
:type value: tuple of bytes
:return: converted tuple value
:rtype: value of native python types (bytes, int, unicode (or str for py3k))
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L712-L729
| null |
class Response(list):
"""
Represents a single response from the server in compliance with the Tarantool protocol.
Responsible for data encapsulation (i.e. received list of tuples) and parses binary
packet received from the server.
"""
def __init__(self, header, body, charset="utf-8", errors="strict", field_types=None):
"""
Create an instance of `Response` using data received from the server.
__init__() itself reads data from the socket, parses response body and
sets appropriate instance attributes.
:param header: header of the response
:type header: array of bytes
:param body: body of the response
:type body: array of bytes
"""
# This is not necessary, because underlying list data structures are created in the __new__(). But let it be.
super(Response, self).__init__()
self.charset = charset
self.errors = errors
self._body_length = None
self._request_id = None
self._request_type = None
self._completion_status = None
self._return_code = None
self._return_message = None
self._rowcount = None
self.field_types = field_types
# Unpack header
if isinstance(header, (tuple, list)):
self._request_type, self._body_length, self._request_id = header
else:
self._request_type, self._body_length, self._request_id = struct_LLL.unpack(header)
if body:
self._unpack_body(body)
@staticmethod
def _unpack_int_base128(varint, offset):
"""Implement Perl unpack's 'w' option, aka base 128 decoding."""
res = ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
if ord(varint[offset]) >= 0x80:
offset += 1
res = ((res - 0x80) << 7) + ord(varint[offset])
return res, offset + 1
def _unpack_tuple(self, buff):
"""
Unpacks the tuple from byte buffer
<tuple> ::= <cardinality><field>+
:param buff: byte array of the form <cardinality><field>+
:type buff: ctypes buffer or bytes
:return: tuple of unpacked values
:rtype: tuple
"""
cardinality = struct_L.unpack_from(buff)[0]
_tuple = ['']*cardinality
offset = 4 # The first 4 bytes in the response body is the <count> we have already read
for i in xrange(cardinality):
field_size, offset = self._unpack_int_base128(buff, offset)
field_data = struct.unpack_from("<%ds" % field_size, buff, offset)[0]
_tuple[i] = field(field_data)
offset += field_size
return tuple(_tuple)
def _unpack_body(self, buff):
"""
Parse the response body.
After body unpacking its data available as python list of tuples
For each request type the response body has the same format:
<insert_response_body> ::= <count> | <count><fq_tuple>
<update_response_body> ::= <count> | <count><fq_tuple>
<delete_response_body> ::= <count> | <count><fq_tuple>
<select_response_body> ::= <count><fq_tuple>*
<call_response_body> ::= <count><fq_tuple>
:param buff: buffer containing request body
:type byff: ctypes buffer
"""
# Unpack <return_code> and <count> (how many records affected or selected)
self._return_code = struct_L.unpack_from(buff, offset=0)[0]
# Separate return_code and completion_code
self._completion_status = self._return_code & 0x00ff
self._return_code >>= 8
# In case of an error unpack the body as an error message
if self._return_code != 0:
self._return_message = unicode(buff[4:-1], self.charset, self.errors)
if self._completion_status == 2:
raise TarantoolError(self._return_code, self._return_message)
# Unpack <count> (how many records affected or selected)
self._rowcount = struct_L.unpack_from(buff, offset=4)[0]
# If the response doesn't contain any tuple - there is nothing to unpack
if self._body_length == 8:
return
# Parse response tuples (<fq_tuple>)
if self._rowcount > 0:
offset = 8 # The first 4 bytes in the response body is the <count> we have already read
while offset < self._body_length:
# In resonse tuples have the form <size><tuple> (<fq_tuple> ::= <size><tuple>).
# Attribute <size> takes into account only size of tuple's <field> payload,
# but does not include 4-byte of <cardinality> field.
#Therefore the actual size of the <tuple> is greater to 4 bytes.
tuple_size = struct.unpack_from("<L", buff, offset)[0] + 4
tuple_data = struct.unpack_from("<%ds" % (tuple_size), buff, offset+4)[0]
tuple_value = self._unpack_tuple(tuple_data)
if self.field_types:
self.append(self._cast_tuple(tuple_value))
else:
self.append(tuple_value)
offset = offset + tuple_size + 4 # This '4' is a size of <size> attribute
@property
def completion_status(self):
"""
:type: int
Request completion status.
There are only three completion status codes in use:
* ``0`` -- "success"; the only possible :attr:`return_code` with this status is ``0``
* ``1`` -- "try again"; an indicator of an intermittent error.
This status is handled automatically by this module.
* ``2`` -- "error"; in this case :attr:`return_code` holds the actual error.
"""
return self._completion_status
@property
def rowcount(self):
"""
:type: int
Number of rows affected or returned by a query.
"""
return self._rowcount
@property
def return_code(self):
"""
:type: int
Required field in the server response.
Value of :attr:`return_code` can be ``0`` if request was sucessfull or contains an error code.
If :attr:`return_code` is non-zero than :attr:`return_message` contains an error message.
"""
return self._return_code
@property
def return_message(self):
"""
:type: str
The error message returned by the server in case of :attr:`return_code` is non-zero.
"""
return self._return_message
def _cast_field(self, cast_to, value):
"""
Convert field type from raw bytes to native python type
:param cast_to: native python type to cast to
:type cast_to: a type object (one of bytes, int, unicode (str for py3k))
:param value: raw value from the database
:type value: bytes
:return: converted value
:rtype: value of native python type (one of bytes, int, unicode (str for py3k))
"""
if cast_to in (int, long, str):
return cast_to(value)
elif cast_to == unicode:
try:
value = value.decode(self.charset, self.errors)
except UnicodeEncodeError, e:
raise InvalidData("Error encoding unicode value '%s': %s" % (repr(value), e))
return value
elif cast_to in (any, bytes):
return value
else:
raise TypeError("Invalid field type %s" % (cast_to))
def __repr__(self):
"""
Return user friendy string representation of the object.
Useful for the interactive sessions and debugging.
:rtype: str or None
"""
# If response is not empty then return default list representation
# If there was an SELECT request - return list representation even it is empty
if self._request_type == Request.TNT_OP_SELECT or len(self):
return super(Response, self).__repr__()
# Ping
if self._request_type == Request.TNT_OP_PING:
return "ping ok"
# Return string of form "N records affected"
affected = str(self.rowcount) + (" record" if self.rowcount == 1 else " records")
if self._request_type == Request.TNT_OP_DELETE:
return affected + " deleted"
if self._request_type == Request.TNT_OP_INSERT:
return affected + " inserted"
if self._request_type == Request.TNT_OP_UPDATE:
return affected + " updated"
return affected + " affected"
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.ping
|
python
|
def ping(self):
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
|
send ping packet to tarantool server and receive response with empty body
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L865-L872
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def insert(self, space_no, *args):
"""
insert tuple, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select(self, space_no, index_no, field_types, *args):
"""
select tuple(s)
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update(self, space_no, key_tuple, op_list):
"""
send update command(s)
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def update_ret(self, space_no, field_types, key_tuple, op_list):
"""
send update command(s), updated tuple(s) is(are) sent back
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def delete(self, space_no, *args):
"""
delete tuple by primary key
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def call(self, proc_name, field_types, *args):
"""
call server procedure
"""
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.insert
|
python
|
def insert(self, space_no, *args):
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
|
insert tuple, if primary key exists server will return error
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L874-L881
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def ping(self):
"""
send ping packet to tarantool server and receive response with empty body
"""
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select(self, space_no, index_no, field_types, *args):
"""
select tuple(s)
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update(self, space_no, key_tuple, op_list):
"""
send update command(s)
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def update_ret(self, space_no, field_types, key_tuple, op_list):
"""
send update command(s), updated tuple(s) is(are) sent back
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def delete(self, space_no, *args):
"""
delete tuple by primary key
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def call(self, proc_name, field_types, *args):
"""
call server procedure
"""
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.select
|
python
|
def select(self, space_no, index_no, field_types, *args):
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
select tuple(s)
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L893-L900
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def ping(self):
"""
send ping packet to tarantool server and receive response with empty body
"""
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert(self, space_no, *args):
"""
insert tuple, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update(self, space_no, key_tuple, op_list):
"""
send update command(s)
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def update_ret(self, space_no, field_types, key_tuple, op_list):
"""
send update command(s), updated tuple(s) is(are) sent back
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def delete(self, space_no, *args):
"""
delete tuple by primary key
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def call(self, proc_name, field_types, *args):
"""
call server procedure
"""
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.update
|
python
|
def update(self, space_no, key_tuple, op_list):
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
|
send update command(s)
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L911-L918
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def ping(self):
"""
send ping packet to tarantool server and receive response with empty body
"""
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert(self, space_no, *args):
"""
insert tuple, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select(self, space_no, index_no, field_types, *args):
"""
select tuple(s)
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update_ret(self, space_no, field_types, key_tuple, op_list):
"""
send update command(s), updated tuple(s) is(are) sent back
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def delete(self, space_no, *args):
"""
delete tuple by primary key
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def call(self, proc_name, field_types, *args):
"""
call server procedure
"""
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.update_ret
|
python
|
def update_ret(self, space_no, field_types, key_tuple, op_list):
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
send update command(s), updated tuple(s) is(are) sent back
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L920-L928
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def ping(self):
"""
send ping packet to tarantool server and receive response with empty body
"""
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert(self, space_no, *args):
"""
insert tuple, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select(self, space_no, index_no, field_types, *args):
"""
select tuple(s)
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update(self, space_no, key_tuple, op_list):
"""
send update command(s)
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete(self, space_no, *args):
"""
delete tuple by primary key
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def call(self, proc_name, field_types, *args):
"""
call server procedure
"""
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.delete
|
python
|
def delete(self, space_no, *args):
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
|
delete tuple by primary key
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L930-L937
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def ping(self):
"""
send ping packet to tarantool server and receive response with empty body
"""
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert(self, space_no, *args):
"""
insert tuple, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select(self, space_no, index_no, field_types, *args):
"""
select tuple(s)
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update(self, space_no, key_tuple, op_list):
"""
send update command(s)
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def update_ret(self, space_no, field_types, key_tuple, op_list):
"""
send update command(s), updated tuple(s) is(are) sent back
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def call(self, proc_name, field_types, *args):
"""
call server procedure
"""
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
zlobspb/txtarantool
|
txtarantool.py
|
TarantoolProtocol.call
|
python
|
def call(self, proc_name, field_types, *args):
d = self.replyQueue.get()
packet = RequestCall(self.charset, self.errors, d._ipro_request_id, proc_name, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
call server procedure
|
train
|
https://github.com/zlobspb/txtarantool/blob/e8d451d53e1c99ccf1f23ce36a9c589fa2ed0350/txtarantool.py#L985-L992
| null |
class TarantoolProtocol(IprotoPacketReceiver, policies.TimeoutMixin, object):
"""
Tarantool client protocol.
"""
space_no = 0
def __init__(self, charset="utf-8", errors="strict"):
self.charset = charset
self.errors = errors
self.replyQueue = IproDeferredQueue()
def connectionMade(self):
self.connected = 1
self.factory.addConnection(self)
def connectionLost(self, why):
self.connected = 0
self.factory.delConnection(self)
IprotoPacketReceiver.connectionLost(self, why)
self.replyQueue.broadcast(ConnectionError("Lost connection"))
def packetReceived(self, header, body):
self.resetTimeout()
if not self.replyQueue.check_id(header[2]):
return self.transport.loseConnection()
self.replyQueue.put(header[2], (header, body))
@staticmethod
def handle_reply(r, charset, errors, field_types):
if isinstance(r, Exception):
raise r
return Response(r[0], r[1], charset, errors, field_types)
def send_packet(self, packet, field_types=None):
self.transport.write(bytes(packet))
d = self.replyQueue.get()
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
# Tarantool COMMANDS
def ping(self):
"""
send ping packet to tarantool server and receive response with empty body
"""
d = self.replyQueue.get_ping()
packet = RequestPing(self.charset, self.errors)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert(self, space_no, *args):
"""
insert tuple, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_ADD, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def insert_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_ADD | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select(self, space_no, index_no, field_types, *args):
"""
select tuple(s)
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, 0, 0xffffffff, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def select_ext(self, space_no, index_no, offset, limit, field_types, *args):
"""
select tuple(s), additional parameters are submitted: offset and limit
"""
d = self.replyQueue.get()
packet = RequestSelect(self.charset, self.errors, d._ipro_request_id, space_no, index_no, offset, limit, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def update(self, space_no, key_tuple, op_list):
"""
send update command(s)
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id, space_no, 0, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def update_ret(self, space_no, field_types, key_tuple, op_list):
"""
send update command(s), updated tuple(s) is(are) sent back
"""
d = self.replyQueue.get()
packet = RequestUpdate(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_RETURN, key_tuple, op_list)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def delete(self, space_no, *args):
"""
delete tuple by primary key
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def delete_ret(self, space_no, field_types, *args):
"""
delete tuple by primary key, deleted tuple is sent back
"""
d = self.replyQueue.get()
packet = RequestDelete(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace(self, space_no, *args):
"""
insert tuple, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, 0, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if primary key exists it will be rewritten
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
def replace_req(self, space_no, *args):
"""
insert tuple, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id, space_no, Request.TNT_FLAG_REPLACE, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, None)
def replace_req_ret(self, space_no, field_types, *args):
"""
insert tuple, inserted tuple is sent back, if tuple with same primary key doesn't exist server will return error
"""
d = self.replyQueue.get()
packet = RequestInsert(self.charset, self.errors, d._ipro_request_id,
space_no, Request.TNT_FLAG_REPLACE | Request.TNT_FLAG_RETURN, *args)
self.transport.write(bytes(packet))
return d.addCallback(self.handle_reply, self.charset, self.errors, field_types)
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.make_gatherer
|
python
|
def make_gatherer(cls, cell, source_tiers, gatherby):
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
|
Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L77-L93
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
def enqueue_task(self, source, *args):
""" Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so. """
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
@asyncio.coroutine
def coord_wrap(self, *args):
""" Wrap the coroutine with coordination throttles. """
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
@asyncio.coroutine
def emit(self, *args):
""" Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency. """
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
@asyncio.coroutine
def flush(self):
""" Flush the buffer of buffered tiers to our destination tiers. """
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
def add_source(self, tier):
""" Schedule this tier to be called when another tier emits. """
tier.add_dest(self)
self.sources.append(tier)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
def close(self):
""" Free any potential cycles. """
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.enqueue_task
|
python
|
def enqueue_task(self, source, *args):
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
|
Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L132-L141
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def make_gatherer(cls, cell, source_tiers, gatherby):
""" Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier. """
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
@asyncio.coroutine
def coord_wrap(self, *args):
""" Wrap the coroutine with coordination throttles. """
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
@asyncio.coroutine
def emit(self, *args):
""" Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency. """
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
@asyncio.coroutine
def flush(self):
""" Flush the buffer of buffered tiers to our destination tiers. """
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
def add_source(self, tier):
""" Schedule this tier to be called when another tier emits. """
tier.add_dest(self)
self.sources.append(tier)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
def close(self):
""" Free any potential cycles. """
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.coord_wrap
|
python
|
def coord_wrap(self, *args):
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
|
Wrap the coroutine with coordination throttles.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L144-L148
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def make_gatherer(cls, cell, source_tiers, gatherby):
""" Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier. """
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
def enqueue_task(self, source, *args):
""" Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so. """
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
@asyncio.coroutine
@asyncio.coroutine
def emit(self, *args):
""" Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency. """
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
@asyncio.coroutine
def flush(self):
""" Flush the buffer of buffered tiers to our destination tiers. """
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
def add_source(self, tier):
""" Schedule this tier to be called when another tier emits. """
tier.add_dest(self)
self.sources.append(tier)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
def close(self):
""" Free any potential cycles. """
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.emit
|
python
|
def emit(self, *args):
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
|
Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L151-L168
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def make_gatherer(cls, cell, source_tiers, gatherby):
""" Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier. """
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
def enqueue_task(self, source, *args):
""" Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so. """
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
@asyncio.coroutine
def coord_wrap(self, *args):
""" Wrap the coroutine with coordination throttles. """
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
@asyncio.coroutine
@asyncio.coroutine
def flush(self):
""" Flush the buffer of buffered tiers to our destination tiers. """
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
def add_source(self, tier):
""" Schedule this tier to be called when another tier emits. """
tier.add_dest(self)
self.sources.append(tier)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
def close(self):
""" Free any potential cycles. """
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.flush
|
python
|
def flush(self):
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
|
Flush the buffer of buffered tiers to our destination tiers.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L171-L178
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def make_gatherer(cls, cell, source_tiers, gatherby):
""" Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier. """
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
def enqueue_task(self, source, *args):
""" Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so. """
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
@asyncio.coroutine
def coord_wrap(self, *args):
""" Wrap the coroutine with coordination throttles. """
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
@asyncio.coroutine
def emit(self, *args):
""" Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency. """
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
@asyncio.coroutine
def add_source(self, tier):
""" Schedule this tier to be called when another tier emits. """
tier.add_dest(self)
self.sources.append(tier)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
def close(self):
""" Free any potential cycles. """
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.add_source
|
python
|
def add_source(self, tier):
tier.add_dest(self)
self.sources.append(tier)
|
Schedule this tier to be called when another tier emits.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L180-L183
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def make_gatherer(cls, cell, source_tiers, gatherby):
""" Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier. """
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
def enqueue_task(self, source, *args):
""" Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so. """
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
@asyncio.coroutine
def coord_wrap(self, *args):
""" Wrap the coroutine with coordination throttles. """
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
@asyncio.coroutine
def emit(self, *args):
""" Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency. """
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
@asyncio.coroutine
def flush(self):
""" Flush the buffer of buffered tiers to our destination tiers. """
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
def close(self):
""" Free any potential cycles. """
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
mayfield/cellulario
|
cellulario/tier.py
|
Tier.close
|
python
|
def close(self):
self.cell = None
self.coro = None
self.buffer = None
del self.dests[:]
del self.sources[:]
|
Free any potential cycles.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/tier.py#L189-L195
| null |
class Tier(object):
""" A managed layer of IO operations. Generally speaking a tier should
consist of one or more IO operations that are of consistent complexity
with a single emit data type. A tier emits data instead of returning it.
Emissions flow from one tier to the next set of tiers until the final
tiers are reached.
A tier operates as a node in a directed acyclic graph (DAG). Tiers are
linked together by sourcing other tiers and are considered final when they
are not sourced by any other tier. A final tier or the HEAD tier in SCM
terms can emit values to the consumer of the IOCell itself. For example,
if T1 emits v1 and T2 sources T1 then the coroutine associated with T2 will
be run with an argument of v1. The emissions of T2 would be available to
the cell user. Example python 3.5 style code would be:
>>> @cell.tier()
... async def T1(route):
... await route.emit(1)
>>> @cell.tier(source=T2)
... async def T2(route, v1):
... await route.emit(v1 * 2)
>>> print(list(cell))
[2]
The same example in python 3.4:
>>> @cell.tier_coroutine()
... def T1(route):
... yeild from route.emit(1)
>>> @cell.tier_coroutine(source=T2)
... def T2(route, v1):
... yield from route.emit(v1 * 2)
>>> print(list(cell))
[2]
There are different modes of operation with respect to sourcing and
emitting. A tier may source from more than one tier and the default mode
is to simply run a tier's coroutine for each emission of its source tiers.
An alternative mode is to gather the emissions from several source tiers
and group them by a unique key. When emissions from all the source tiers
have been gathered for a particular grouping key, the coroutine will be
run with an argument list featuring all the relevant emit values. It is a
sort of micro variant of map-reduce.
The work of a tier is done by a user defined `asyncio.coroutine` which is
automatically managed by the `IOCell` and `Coordinator` classes. The
`spec` attributes given by `IOCell.add_tier` are used to train the
coordinator. For example, the spec may indicate a concurrency factor or
buffering min/max values.
A tier contains routing information used to control how emit() calls flow.
The default mode for emit is broadcast style signal emission to any other
tiers that source from this tier. Alternatively the tier can be configured
to buffer emissions until a sufficient number of emissions are available.
The actual number of emit values buffered is controlled by the coordinator.
"""
coro_tier_map = weakref.WeakValueDictionary()
@classmethod
def make_gatherer(cls, cell, source_tiers, gatherby):
""" Produce a single source tier that gathers from a set of tiers when
the key function returns a unique result for each tier. """
pending = collections.defaultdict(dict)
tier_hashes = [hash(x) for x in source_tiers]
@asyncio.coroutine
def organize(route, *args):
srchash = hash(route.source)
key = gatherby(*args)
group = pending[key]
assert srchash not in group
group[srchash] = args
if len(group) == len(tier_hashes):
del pending[key]
yield from route.emit(*[group[x] for x in tier_hashes])
return cls(cell, organize)
def __init__(self, cell, coro, source=None, buffer=0, gatherby=None,
**spec):
if not asyncio.iscoroutinefunction(coro):
raise ValueError("Function argument must be a coroutine")
self.coro = coro
self.coro_tier_map[coro] = self
self.closed = False
self.cell = cell
self.sources = []
self.dests = []
if source:
if not isinstance(source, collections.Sequence):
source = [source]
source_tiers = []
for x_source in source:
if not isinstance(x_source, Tier):
x_source = self.coro_tier_map[x_source]
source_tiers.append(x_source)
if gatherby is not None:
gatherer = self.make_gatherer(cell, source_tiers, gatherby)
for x in source_tiers:
gatherer.add_source(x)
self.add_source(gatherer)
else:
for x in source_tiers:
self.add_source(x)
self.spec = spec
self.buffer_max_size = buffer
self.buffer = [] if buffer != 0 else None
def __repr__(self):
coro_name = self.coro and self.coro.__name__
return '<TaskTier at 0x%x for %s, sources: %d, dests: %d, closed: ' \
'%s>' % (id(self), coro_name, len(self.sources), len(self.dests),
self.closed)
@asyncio.coroutine
def enqueue_task(self, source, *args):
""" Enqueue a task execution. It will run in the background as soon
as the coordinator clears it to do so. """
yield from self.cell.coord.enqueue(self)
route = Route(source, self.cell, self.spec, self.emit)
self.cell.loop.create_task(self.coord_wrap(route, *args))
# To guarantee that the event loop works fluidly, we manually yield
# once. The coordinator enqueue coroutine is not required to yield so
# this ensures we avoid various forms of event starvation regardless.
yield
@asyncio.coroutine
def coord_wrap(self, *args):
""" Wrap the coroutine with coordination throttles. """
yield from self.cell.coord.start(self)
yield from self.coro(*args)
yield from self.cell.coord.finish(self)
@asyncio.coroutine
def emit(self, *args):
""" Send data to the next tier(s). This call can be delayed if the
coordinator thinks the backlog is too high for any of the emit
destinations. Likewise when buffering emit values prior to enqueuing
them we ask the coordinator if we should flush the buffer each time in
case the coordinator is managing the buffering by other metrics such
as latency. """
if self.buffer is not None:
self.buffer.extend(args)
if self.buffer_max_size is not None:
flush = len(self.buffer) >= self.buffer_max_size
else:
flush = yield from self.cell.coord.flush(self)
if flush:
yield from self.flush()
else:
for t in self.dests:
yield from t.enqueue_task(self, *args)
@asyncio.coroutine
def flush(self):
""" Flush the buffer of buffered tiers to our destination tiers. """
if self.buffer is None:
return
data = self.buffer
self.buffer = []
for x in self.dests:
yield from x.enqueue_task(self, *data)
def add_source(self, tier):
""" Schedule this tier to be called when another tier emits. """
tier.add_dest(self)
self.sources.append(tier)
def add_dest(self, tier):
""" Send data to this tier when we emit. """
self.dests.append(tier)
|
mayfield/cellulario
|
cellulario/iocell.py
|
IOCell.init_event_loop
|
python
|
def init_event_loop(self):
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler)
|
Every cell should have its own event loop for proper containment.
The type of event loop is not so important however.
|
train
|
https://github.com/mayfield/cellulario/blob/e9dc10532a0357bc90ebaa2655b36822f9249673/cellulario/iocell.py#L82-L97
| null |
class IOCell(object):
""" A consolidated multi-level bundle of IO operations. This is a useful
facility when doing tiered IO calls such as http requests to get a list
of things and then a fanout of http requests on each of those things and
so forth. The aim of this code is to provide simplified inputs and
outputs to what is otherwise a complex arrangement of IO interactions
and dependencies. Namely the users of this code will use the generator
output to they can iterate over the stream of finalized results as they
are made available for export.
Mechanically this produces a classic generator to the outside world that
internally uses an async event loop to coordinate concurrent tasks.
The tasks may be used to to cause further activity on the output stream.
That is, the initial work orders may be tasks used to seed more work.
Think of this as a portable IO loop that's wrapped up and managed for the
context of a single generator to the outside world. The calling code
will work in normal blocking style. """
Tier = tier.Tier
def __init__(self, coord='noop', debug=DEBUG):
if isinstance(coord, coordination.AbstractCellCoordinator):
self.coord = coord
else:
self.coord = self.make_coord(coord)
self.debug = debug
self.output_buffer = collections.deque()
self.pending_exception = None
self.closed = False
self.tiers = []
self.tiers_coro_map = {}
self.cleaners = []
self.finalized = False
self.init_event_loop()
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None
def make_coord(self, name):
return coordination.coordinators[name]()
def done(self):
return all(x.done() for x in asyncio.Task.all_tasks(loop=self.loop))
def assertNotFinalized(self):
""" Ensure the cell is not used more than once. """
if self.finalized:
raise RuntimeError('Already finalized: %s' % self)
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a
single value or a list of either `Tier` types or coroutine functions
already added to a `Tier` via `add_tier`. """
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs)
def add_cleaner(self, coro):
""" Add a coroutine to run after the cell is done. This is for the
user to perform any cleanup such as closing sockets. """
self.assertNotFinalized()
self.cleaners.append(coro)
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being
decorated is not already a coroutine function it will be wrapped. """
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run
a cell can not be modified again. """
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters
@asyncio.coroutine
def output_feed(self, route, *args):
""" Simplify arguments and store them in the `output` buffer for
yielding to the user. """
self.output_buffer.extend(args)
def loop_exception_handler(self, loop, context):
exc = context.get('exception')
if exc:
if not self.pending_exception:
self.pending_exception = exc
elif self.loop_exception_handler_save:
return self.loop_exception_handler_save(loop, context)
else:
return self.loop.default_exception_handler(context)
def output(self):
""" Produce a classic generator for this cell's final results. """
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close()
def event_loop(self):
""" Run the event loop once. """
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
def _output(self, starters):
for x in starters:
self.loop.create_task(x.enqueue_task(None))
while True:
while self.output_buffer:
yield self.output_buffer.popleft()
if not self.done():
with self.loop_policy:
self.event_loop()
if self.pending_exception:
exc = self.pending_exception
self.pending_exception = None
try:
raise exc
finally:
del exc
else:
flushed = False
for t in self.tiers:
if t.buffer:
self.loop.create_task(t.flush())
flushed = True
if not flushed and not self.output_buffer:
break
with self.loop_policy:
self.loop.run_until_complete(self.clean())
@asyncio.coroutine
def clean(self):
""" Run all of the cleaners added by the user. """
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop)
def close(self):
if self.closed:
return
self.closed = True
if self.finalized:
self.coord.close_wrap()
self.cleanup_event_loop()
for x in self.tiers:
x.close()
self.tiers = None
self.tiers_coro_map = None
self.cleaners = None
self.coord = None
def __iter__(self):
return self.output()
def __del__(self):
self.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.