repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
williamjameshandley/fgivenx | fgivenx/plot.py | plot | python | def plot(x, y, z, ax=None, **kwargs):
r"""
Plot iso-probability mass function, converted to sigmas.
Parameters
----------
x, y, z : numpy arrays
Same as arguments to :func:`matplotlib.pyplot.contour`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
colors: color scheme, optional
:class:`matplotlib.colors.LinearSegmentedColormap`
Color scheme to plot with. Recommend plotting in reverse
(Default: :class:`matplotlib.pyplot.cm.Reds_r`)
smooth: float, optional
Percentage by which to smooth the contours.
(Default: no smoothing)
contour_line_levels: List[float], optional
Contour lines to be plotted. (Default: [1,2])
linewidths: float, optional
Thickness of contour lines. (Default: 0.3)
contour_color_levels: List[float], optional
Contour color levels.
(Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`)
fineness: float, optional
Spacing of contour color levels. (Default: 0.1)
lines: bool, optional
(Default: True)
rasterize_contours: bool, optional
Rasterize the contours while keeping the lines, text etc in vector
format. Useful for reducing file size bloat and making printing
easier when you have dense contours.
(Default: False)
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
if ax is None:
ax = matplotlib.pyplot.gca()
# Get inputs
colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r)
smooth = kwargs.pop('smooth', False)
linewidths = kwargs.pop('linewidths', 0.3)
contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3])
fineness = kwargs.pop('fineness', 0.5)
default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1,
fineness)
contour_color_levels = kwargs.pop('contour_color_levels',
default_color_levels)
rasterize_contours = kwargs.pop('rasterize_contours', False)
lines = kwargs.pop('lines', True)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# Convert to sigmas
z = numpy.sqrt(2) * scipy.special.erfinv(1 - z)
# Gaussian filter if desired the sigmas by a factor of smooth%
if smooth:
sigma = smooth*numpy.array(z.shape)/100.0
z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0)
# Plot the filled contours onto the axis ax
cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels)
# Rasterize contours (the rest of the figure stays in vector format)
if rasterize_contours:
for c in cbar.collections:
c.set_rasterized(True)
# Remove those annoying white lines
for c in cbar.collections:
c.set_edgecolor("face")
# Plot some sigma-based contour lines
if lines:
ax.contour(x, y, z, colors='k', linewidths=linewidths,
levels=contour_line_levels)
# Return the contours for use as a colourbar later
return cbar | r"""
Plot iso-probability mass function, converted to sigmas.
Parameters
----------
x, y, z : numpy arrays
Same as arguments to :func:`matplotlib.pyplot.contour`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
colors: color scheme, optional
:class:`matplotlib.colors.LinearSegmentedColormap`
Color scheme to plot with. Recommend plotting in reverse
(Default: :class:`matplotlib.pyplot.cm.Reds_r`)
smooth: float, optional
Percentage by which to smooth the contours.
(Default: no smoothing)
contour_line_levels: List[float], optional
Contour lines to be plotted. (Default: [1,2])
linewidths: float, optional
Thickness of contour lines. (Default: 0.3)
contour_color_levels: List[float], optional
Contour color levels.
(Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`)
fineness: float, optional
Spacing of contour color levels. (Default: 0.1)
lines: bool, optional
(Default: True)
rasterize_contours: bool, optional
Rasterize the contours while keeping the lines, text etc in vector
format. Useful for reducing file size bloat and making printing
easier when you have dense contours.
(Default: False)
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/plot.py#L7-L106 | null | import scipy
import scipy.ndimage
import numpy
import matplotlib.pyplot
def plot_lines(x, fsamps, ax=None, downsample=100, **kwargs):
"""
Plot function samples as a set of line plots.
Parameters
----------
x: 1D array-like
x values to plot
fsamps: 2D array-like
set of functions to plot at each x. As returned by
:func:`fgivenx.compute_samples`
ax: axes object
:class:`matplotlib.pyplot.ax` to plot on.
downsample: int, optional
Reduce the number of samples to a viewable quantity. (Default 100)
any other keywords are passed to :meth:`matplotlib.pyplot.ax.plot`
"""
if ax is None:
ax = matplotlib.pyplot.gca()
if downsample < len(fsamps.T):
indices = numpy.random.choice(len(fsamps.T), downsample, replace=False)
else:
indices = numpy.arange(len(fsamps.T))
color = kwargs.pop('color', 'k')
alpha = kwargs.pop('alpha', 0.1)
for y in fsamps.T[indices]:
ax.plot(x, y, color=color, alpha=alpha, **kwargs)
|
williamjameshandley/fgivenx | fgivenx/plot.py | plot_lines | python | def plot_lines(x, fsamps, ax=None, downsample=100, **kwargs):
if ax is None:
ax = matplotlib.pyplot.gca()
if downsample < len(fsamps.T):
indices = numpy.random.choice(len(fsamps.T), downsample, replace=False)
else:
indices = numpy.arange(len(fsamps.T))
color = kwargs.pop('color', 'k')
alpha = kwargs.pop('alpha', 0.1)
for y in fsamps.T[indices]:
ax.plot(x, y, color=color, alpha=alpha, **kwargs) | Plot function samples as a set of line plots.
Parameters
----------
x: 1D array-like
x values to plot
fsamps: 2D array-like
set of functions to plot at each x. As returned by
:func:`fgivenx.compute_samples`
ax: axes object
:class:`matplotlib.pyplot.ax` to plot on.
downsample: int, optional
Reduce the number of samples to a viewable quantity. (Default 100)
any other keywords are passed to :meth:`matplotlib.pyplot.ax.plot` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/plot.py#L109-L139 | null | import scipy
import scipy.ndimage
import numpy
import matplotlib.pyplot
def plot(x, y, z, ax=None, **kwargs):
r"""
Plot iso-probability mass function, converted to sigmas.
Parameters
----------
x, y, z : numpy arrays
Same as arguments to :func:`matplotlib.pyplot.contour`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
colors: color scheme, optional
:class:`matplotlib.colors.LinearSegmentedColormap`
Color scheme to plot with. Recommend plotting in reverse
(Default: :class:`matplotlib.pyplot.cm.Reds_r`)
smooth: float, optional
Percentage by which to smooth the contours.
(Default: no smoothing)
contour_line_levels: List[float], optional
Contour lines to be plotted. (Default: [1,2])
linewidths: float, optional
Thickness of contour lines. (Default: 0.3)
contour_color_levels: List[float], optional
Contour color levels.
(Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`)
fineness: float, optional
Spacing of contour color levels. (Default: 0.1)
lines: bool, optional
(Default: True)
rasterize_contours: bool, optional
Rasterize the contours while keeping the lines, text etc in vector
format. Useful for reducing file size bloat and making printing
easier when you have dense contours.
(Default: False)
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
if ax is None:
ax = matplotlib.pyplot.gca()
# Get inputs
colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r)
smooth = kwargs.pop('smooth', False)
linewidths = kwargs.pop('linewidths', 0.3)
contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3])
fineness = kwargs.pop('fineness', 0.5)
default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1,
fineness)
contour_color_levels = kwargs.pop('contour_color_levels',
default_color_levels)
rasterize_contours = kwargs.pop('rasterize_contours', False)
lines = kwargs.pop('lines', True)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# Convert to sigmas
z = numpy.sqrt(2) * scipy.special.erfinv(1 - z)
# Gaussian filter if desired the sigmas by a factor of smooth%
if smooth:
sigma = smooth*numpy.array(z.shape)/100.0
z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0)
# Plot the filled contours onto the axis ax
cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels)
# Rasterize contours (the rest of the figure stays in vector format)
if rasterize_contours:
for c in cbar.collections:
c.set_rasterized(True)
# Remove those annoying white lines
for c in cbar.collections:
c.set_edgecolor("face")
# Plot some sigma-based contour lines
if lines:
ax.contour(x, y, z, colors='k', linewidths=linewidths,
levels=contour_line_levels)
# Return the contours for use as a colourbar later
return cbar
|
williamjameshandley/fgivenx | fgivenx/mass.py | PMF | python | def PMF(samples, y):
# Remove any nans from the samples
samples = numpy.array(samples)
samples = samples[~numpy.isnan(samples)]
try:
# Compute the kernel density estimate
kernel = scipy.stats.gaussian_kde(samples)
# Add two more samples definitely outside the range and sort them
mn = min(samples) - 10*numpy.sqrt(kernel.covariance[0, 0])
mx = max(samples) + 10*numpy.sqrt(kernel.covariance[0, 0])
y_ = numpy.linspace(mn, mx, len(y)*10)
# Compute the probabilities at each of the extended samples
ps_ = kernel(y_)
# Compute the masses
ms = []
for yi in y:
# compute the probability at this y value
p = kernel(yi)
if p <= max(ps_)*1e-5:
m = 0.
else:
# Find out which samples have greater probability than P(y)
bools = ps_ > p
# Compute indices where to start and stop the integration
stops = numpy.where(numpy.logical_and(~bools[:-1], bools[1:]))
starts = numpy.where(numpy.logical_and(bools[:-1], ~bools[1:]))
# Compute locations
starts = [scipy.optimize.brentq(lambda u: kernel(u)-p,
y_[i], y_[i+1])
for i in starts[0]]
starts = [-numpy.inf] + starts
stops = [scipy.optimize.brentq(lambda u: kernel(u)-p,
y_[i], y_[i+1])
for i in stops[0]]
stops = stops + [numpy.inf]
# Sum up the masses
m = sum(kernel.integrate_box_1d(a, b)
for a, b in zip(starts, stops))
ms.append(m)
return numpy.array(ms)
except numpy.linalg.LinAlgError:
return numpy.zeros_like(y) | Compute the probability mass function.
The set of samples defines a probability density P(y),
which is computed using a kernel density estimator.
From :math:`P(y)` we define:
:math:`\mathrm{pmf}(p) = \int_{P(y)<p} P(y) dy`
This is the cumulative distribution function expressed as a
function of the probability
We aim to compute :math:`M(y)`, which indicates the amount of
probability contained outside the iso-probability contour
passing through :math:`y`::
^ P(y) ...
| | . .
| | .
p|- - - - - - - - - - .+- - - - . - - - - - - - - - - -
| .#| #.
| .##| ##.
| .##| ##.
| .###| ###. M(p)
| .###| ###. is the
| .###| ###. shaded area
| .####| ####.
| .####| ####.
| ..#####| #####..
| ....#######| #######....
| .###########| ###########.
+---------------------+-------------------------------> y
t
^ M(p) ^ M(y)
| |
1| +++ 1| +
| + | + +
| ++++++++ | + +
| ++ | ++ ++
| ++ | ++ ++
|+++ |+++ +++
+---------------------> p +---------------------> y
0
Parameters
----------
samples: array-like
Array of samples from a probability density P(y).
y: array-like (optional)
Array to evaluate the PDF at
Returns
-------
1D numpy.array:
PMF evaluated at each y value | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/mass.py#L9-L117 | null | """ Utilities for computing the probability mass function. """
import scipy.stats
import scipy.interpolate
import numpy
from fgivenx.parallel import parallel_apply
from fgivenx.io import CacheException, Cache
def compute_pmf(fsamps, y, **kwargs):
""" Compute the pmf defined by fsamps at each x for each y.
Parameters
----------
fsamps: 2D array-like
array of function samples, as returned by
:func:`fgivenx.compute_samples`
y: 1D array-like
y values to evaluate the PMF at
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
Returns
-------
2D numpy.array
probability mass function at each x for each y
`shape=(len(fsamps),len(y)`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_masses')
try:
return cache.check(fsamps, y)
except CacheException as e:
print(e)
masses = parallel_apply(PMF, fsamps, postcurry=(y,), parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
masses = numpy.array(masses).transpose().copy()
if cache:
cache.save(fsamps, y, masses)
return masses
|
williamjameshandley/fgivenx | fgivenx/mass.py | compute_pmf | python | def compute_pmf(fsamps, y, **kwargs):
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_masses')
try:
return cache.check(fsamps, y)
except CacheException as e:
print(e)
masses = parallel_apply(PMF, fsamps, postcurry=(y,), parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
masses = numpy.array(masses).transpose().copy()
if cache:
cache.save(fsamps, y, masses)
return masses | Compute the pmf defined by fsamps at each x for each y.
Parameters
----------
fsamps: 2D array-like
array of function samples, as returned by
:func:`fgivenx.compute_samples`
y: 1D array-like
y values to evaluate the PMF at
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
Returns
-------
2D numpy.array
probability mass function at each x for each y
`shape=(len(fsamps),len(y)` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/mass.py#L120-L161 | [
"def parallel_apply(f, array, **kwargs):\n \"\"\" Apply a function to an array with openmp parallelisation.\n\n Equivalent to `[f(x) for x in array]`, but parallelised if required.\n\n Parameters\n ----------\n f: function\n Univariate function to apply to each element of array\n\n array: array-like\n Array to apply f to\n\n parallel: int or bool, optional\n int > 0: number of processes to parallelise over\n\n int < 0 or bool=True: use OMP_NUM_THREADS to choose parallelisation\n\n bool=False or int=0: do not parallelise\n\n tqdm_kwargs: dict, optional\n additional kwargs for tqdm progress bars.\n\n precurry: tuple, optional\n immutable arguments to pass to f before x,\n i.e. `[f(precurry,x) for x in array]`\n\n postcurry: tuple, optional\n immutable arguments to pass to f after x\n i.e. `[f(x,postcurry) for x in array]`\n\n Returns\n -------\n list:\n `[f(precurry,x,postcurry) for x in array]`\n parallelised according to parallel\n \"\"\"\n\n precurry = tuple(kwargs.pop('precurry', ()))\n postcurry = tuple(kwargs.pop('postcurry', ()))\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n try:\n # If running in a jupyter notebook then use tqdm_notebook.\n progress = tqdm_notebook if get_ipython().has_trait('kernel') else tqdm\n except (NameError, AssertionError):\n # Otherwise use regular tqdm progress bar\n progress = tqdm\n if not parallel:\n return [f(*(precurry + (x,) + postcurry)) for x in\n progress(array, **tqdm_kwargs)]\n elif parallel is True:\n parallel = cpu_count()\n elif isinstance(parallel, int):\n if parallel < 0:\n parallel = cpu_count()\n else:\n parallel = parallel\n else:\n raise ValueError(\"parallel keyword must be an integer or bool\")\n\n if parallel and not PARALLEL:\n warnings.warn(\"You need to install the package joblib\"\n \"if you want to use parallelisation\")\n\n return Parallel(n_jobs=parallel)(delayed(f)(*(precurry + (x,) + postcurry))\n for x in progress(array, **tqdm_kwargs))\n",
"def check(self, *args):\n \"\"\" Check that the arguments haven't changed since the last call.\n\n Parameters\n ----------\n *args:\n All but the last argument are inputs to the cached function. The\n last is the actual value of the function.\n\n Returns\n -------\n If arguments unchanged:\n return the cached answer\n else:\n indicate recomputation required by throwing a\n :class:`CacheException`.\n \"\"\"\n data = self.load()\n\n if len(data)-1 != len(args):\n raise ValueError(\"Wrong number of arguments passed to Cache.check\")\n\n try:\n for x, x_check in zip(data, args):\n if isinstance(x, list):\n if len(x) != len(x_check):\n raise CacheException\n for x_i, x_check_i in zip(x, x_check):\n if x_i.shape != x_check_i.shape:\n raise CacheException\n elif not numpy.allclose(x_i, x_check_i,\n equal_nan=True):\n raise CacheException\n elif x.shape != x_check.shape:\n raise CacheException\n elif not numpy.allclose(x, x_check, equal_nan=True):\n raise CacheException\n\n except CacheException:\n raise CacheChanged(self.file_root)\n\n print(CacheOK(self.file_root))\n return data[-1]\n",
"def save(self, *args):\n \"\"\" Save cache to file using pickle.\n\n Parameters\n ----------\n *args:\n All but the last argument are inputs to the cached function. The\n last is the actual value of the function.\n \"\"\"\n with open(self.file_root + '.pkl', \"wb\") as f:\n pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)\n"
] | """ Utilities for computing the probability mass function. """
import scipy.stats
import scipy.interpolate
import numpy
from fgivenx.parallel import parallel_apply
from fgivenx.io import CacheException, Cache
def PMF(samples, y):
""" Compute the probability mass function.
The set of samples defines a probability density P(y),
which is computed using a kernel density estimator.
From :math:`P(y)` we define:
:math:`\mathrm{pmf}(p) = \int_{P(y)<p} P(y) dy`
This is the cumulative distribution function expressed as a
function of the probability
We aim to compute :math:`M(y)`, which indicates the amount of
probability contained outside the iso-probability contour
passing through :math:`y`::
^ P(y) ...
| | . .
| | .
p|- - - - - - - - - - .+- - - - . - - - - - - - - - - -
| .#| #.
| .##| ##.
| .##| ##.
| .###| ###. M(p)
| .###| ###. is the
| .###| ###. shaded area
| .####| ####.
| .####| ####.
| ..#####| #####..
| ....#######| #######....
| .###########| ###########.
+---------------------+-------------------------------> y
t
^ M(p) ^ M(y)
| |
1| +++ 1| +
| + | + +
| ++++++++ | + +
| ++ | ++ ++
| ++ | ++ ++
|+++ |+++ +++
+---------------------> p +---------------------> y
0
Parameters
----------
samples: array-like
Array of samples from a probability density P(y).
y: array-like (optional)
Array to evaluate the PDF at
Returns
-------
1D numpy.array:
PMF evaluated at each y value
"""
# Remove any nans from the samples
samples = numpy.array(samples)
samples = samples[~numpy.isnan(samples)]
try:
# Compute the kernel density estimate
kernel = scipy.stats.gaussian_kde(samples)
# Add two more samples definitely outside the range and sort them
mn = min(samples) - 10*numpy.sqrt(kernel.covariance[0, 0])
mx = max(samples) + 10*numpy.sqrt(kernel.covariance[0, 0])
y_ = numpy.linspace(mn, mx, len(y)*10)
# Compute the probabilities at each of the extended samples
ps_ = kernel(y_)
# Compute the masses
ms = []
for yi in y:
# compute the probability at this y value
p = kernel(yi)
if p <= max(ps_)*1e-5:
m = 0.
else:
# Find out which samples have greater probability than P(y)
bools = ps_ > p
# Compute indices where to start and stop the integration
stops = numpy.where(numpy.logical_and(~bools[:-1], bools[1:]))
starts = numpy.where(numpy.logical_and(bools[:-1], ~bools[1:]))
# Compute locations
starts = [scipy.optimize.brentq(lambda u: kernel(u)-p,
y_[i], y_[i+1])
for i in starts[0]]
starts = [-numpy.inf] + starts
stops = [scipy.optimize.brentq(lambda u: kernel(u)-p,
y_[i], y_[i+1])
for i in stops[0]]
stops = stops + [numpy.inf]
# Sum up the masses
m = sum(kernel.integrate_box_1d(a, b)
for a, b in zip(starts, stops))
ms.append(m)
return numpy.array(ms)
except numpy.linalg.LinAlgError:
return numpy.zeros_like(y)
|
williamjameshandley/fgivenx | fgivenx/io.py | Cache.check | python | def check(self, *args):
data = self.load()
if len(data)-1 != len(args):
raise ValueError("Wrong number of arguments passed to Cache.check")
try:
for x, x_check in zip(data, args):
if isinstance(x, list):
if len(x) != len(x_check):
raise CacheException
for x_i, x_check_i in zip(x, x_check):
if x_i.shape != x_check_i.shape:
raise CacheException
elif not numpy.allclose(x_i, x_check_i,
equal_nan=True):
raise CacheException
elif x.shape != x_check.shape:
raise CacheException
elif not numpy.allclose(x, x_check, equal_nan=True):
raise CacheException
except CacheException:
raise CacheChanged(self.file_root)
print(CacheOK(self.file_root))
return data[-1] | Check that the arguments haven't changed since the last call.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
Returns
-------
If arguments unchanged:
return the cached answer
else:
indicate recomputation required by throwing a
:class:`CacheException`. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/io.py#L54-L96 | [
"def load(self):\n \"\"\" Load cache from file using pickle. \"\"\"\n try:\n with open(self.file_root + '.pkl', \"rb\") as f:\n return pickle.load(f)\n except IOError:\n raise CacheMissing(self.file_root)\n"
] | class Cache(object):
""" Cacheing tool for saving recomputation.
Parameters
----------
file_root: str
cached values are saved in file_root.pkl
"""
def __init__(self, file_root):
self.file_root = file_root
dirname = os.path.dirname(self.file_root)
if not os.path.exists(dirname):
os.makedirs(dirname)
def load(self):
""" Load cache from file using pickle. """
try:
with open(self.file_root + '.pkl', "rb") as f:
return pickle.load(f)
except IOError:
raise CacheMissing(self.file_root)
def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)
|
williamjameshandley/fgivenx | fgivenx/io.py | Cache.load | python | def load(self):
try:
with open(self.file_root + '.pkl', "rb") as f:
return pickle.load(f)
except IOError:
raise CacheMissing(self.file_root) | Load cache from file using pickle. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/io.py#L98-L104 | null | class Cache(object):
""" Cacheing tool for saving recomputation.
Parameters
----------
file_root: str
cached values are saved in file_root.pkl
"""
def __init__(self, file_root):
self.file_root = file_root
dirname = os.path.dirname(self.file_root)
if not os.path.exists(dirname):
os.makedirs(dirname)
def check(self, *args):
""" Check that the arguments haven't changed since the last call.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
Returns
-------
If arguments unchanged:
return the cached answer
else:
indicate recomputation required by throwing a
:class:`CacheException`.
"""
data = self.load()
if len(data)-1 != len(args):
raise ValueError("Wrong number of arguments passed to Cache.check")
try:
for x, x_check in zip(data, args):
if isinstance(x, list):
if len(x) != len(x_check):
raise CacheException
for x_i, x_check_i in zip(x, x_check):
if x_i.shape != x_check_i.shape:
raise CacheException
elif not numpy.allclose(x_i, x_check_i,
equal_nan=True):
raise CacheException
elif x.shape != x_check.shape:
raise CacheException
elif not numpy.allclose(x, x_check, equal_nan=True):
raise CacheException
except CacheException:
raise CacheChanged(self.file_root)
print(CacheOK(self.file_root))
return data[-1]
def save(self, *args):
""" Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
"""
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)
|
williamjameshandley/fgivenx | fgivenx/io.py | Cache.save | python | def save(self, *args):
with open(self.file_root + '.pkl', "wb") as f:
pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL) | Save cache to file using pickle.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/io.py#L106-L116 | null | class Cache(object):
""" Cacheing tool for saving recomputation.
Parameters
----------
file_root: str
cached values are saved in file_root.pkl
"""
def __init__(self, file_root):
self.file_root = file_root
dirname = os.path.dirname(self.file_root)
if not os.path.exists(dirname):
os.makedirs(dirname)
def check(self, *args):
""" Check that the arguments haven't changed since the last call.
Parameters
----------
*args:
All but the last argument are inputs to the cached function. The
last is the actual value of the function.
Returns
-------
If arguments unchanged:
return the cached answer
else:
indicate recomputation required by throwing a
:class:`CacheException`.
"""
data = self.load()
if len(data)-1 != len(args):
raise ValueError("Wrong number of arguments passed to Cache.check")
try:
for x, x_check in zip(data, args):
if isinstance(x, list):
if len(x) != len(x_check):
raise CacheException
for x_i, x_check_i in zip(x, x_check):
if x_i.shape != x_check_i.shape:
raise CacheException
elif not numpy.allclose(x_i, x_check_i,
equal_nan=True):
raise CacheException
elif x.shape != x_check.shape:
raise CacheException
elif not numpy.allclose(x, x_check, equal_nan=True):
raise CacheException
except CacheException:
raise CacheChanged(self.file_root)
print(CacheOK(self.file_root))
return data[-1]
def load(self):
""" Load cache from file using pickle. """
try:
with open(self.file_root + '.pkl', "rb") as f:
return pickle.load(f)
except IOError:
raise CacheMissing(self.file_root)
|
williamjameshandley/fgivenx | fgivenx/parallel.py | parallel_apply | python | def parallel_apply(f, array, **kwargs):
precurry = tuple(kwargs.pop('precurry', ()))
postcurry = tuple(kwargs.pop('postcurry', ()))
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
try:
# If running in a jupyter notebook then use tqdm_notebook.
progress = tqdm_notebook if get_ipython().has_trait('kernel') else tqdm
except (NameError, AssertionError):
# Otherwise use regular tqdm progress bar
progress = tqdm
if not parallel:
return [f(*(precurry + (x,) + postcurry)) for x in
progress(array, **tqdm_kwargs)]
elif parallel is True:
parallel = cpu_count()
elif isinstance(parallel, int):
if parallel < 0:
parallel = cpu_count()
else:
parallel = parallel
else:
raise ValueError("parallel keyword must be an integer or bool")
if parallel and not PARALLEL:
warnings.warn("You need to install the package joblib"
"if you want to use parallelisation")
return Parallel(n_jobs=parallel)(delayed(f)(*(precurry + (x,) + postcurry))
for x in progress(array, **tqdm_kwargs)) | Apply a function to an array with openmp parallelisation.
Equivalent to `[f(x) for x in array]`, but parallelised if required.
Parameters
----------
f: function
Univariate function to apply to each element of array
array: array-like
Array to apply f to
parallel: int or bool, optional
int > 0: number of processes to parallelise over
int < 0 or bool=True: use OMP_NUM_THREADS to choose parallelisation
bool=False or int=0: do not parallelise
tqdm_kwargs: dict, optional
additional kwargs for tqdm progress bars.
precurry: tuple, optional
immutable arguments to pass to f before x,
i.e. `[f(precurry,x) for x in array]`
postcurry: tuple, optional
immutable arguments to pass to f after x
i.e. `[f(x,postcurry) for x in array]`
Returns
-------
list:
`[f(precurry,x,postcurry) for x in array]`
parallelised according to parallel | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/parallel.py#L26-L94 | [
"def cpu_count(): return 1\n",
"def tqdm(x, **kwargs): return x\n",
"def tqdm_notebook(x, **kwargs): return x\n"
] | import warnings
try:
from tqdm import tqdm, tqdm_notebook
except ImportError:
def tqdm(x, **kwargs): return x
def tqdm_notebook(x, **kwargs): return x
try:
PARALLEL = True
from joblib import Parallel, delayed, cpu_count
except ImportError:
PARALLEL = False
class Parallel(object):
def __init__(self, n_jobs=None): pass
def __call__(self, x): return list(x)
def delayed(x): return x
def cpu_count(): return 1
|
williamjameshandley/fgivenx | fgivenx/_utils.py | _check_args | python | def _check_args(logZ, f, x, samples, weights):
# convert to arrays
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
weights = [weights]
# logZ
logZ = numpy.array(logZ, dtype='double')
if len(logZ.shape) is not 1:
raise ValueError("logZ should be a 1D array")
# x
x = numpy.array(x, dtype='double')
if len(x.shape) is not 1:
raise ValueError("x should be a 1D array")
# f
if len(logZ) != len(f):
raise ValueError("len(logZ) = %i != len(f)= %i"
% (len(logZ), len(f)))
for func in f:
if not callable(func):
raise ValueError("first argument f must be function"
"(or list of functions) of two variables")
# samples
if len(logZ) != len(samples):
raise ValueError("len(logZ) = %i != len(samples)= %i"
% (len(logZ), len(samples)))
samples = [numpy.array(s, dtype='double') for s in samples]
for s in samples:
if len(s.shape) is not 2:
raise ValueError("each set of samples should be a 2D array")
# weights
if len(logZ) != len(weights):
raise ValueError("len(logZ) = %i != len(weights)= %i"
% (len(logZ), len(weights)))
weights = [numpy.array(w, dtype='double') if w is not None
else numpy.ones(len(s), dtype='double')
for w, s in zip(weights, samples)]
for w, s in zip(weights, samples):
if len(w.shape) is not 1:
raise ValueError("each set of weights should be a 1D array")
if len(w) != len(s):
raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w)))
return logZ, f, x, samples, weights | Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/_utils.py#L4-L61 | null | import numpy
def _normalise_weights(logZ, weights, ntrim=None):
""" Correctly normalise the weights for trimming
This takes a list of log-evidences, and re-normalises the weights so that
the largest weight across all samples is 1, and the total weight in each
set of samples is proportional to the evidence.
Parameters
----------
logZ: array-like
log-evidences to weight each set of weights by
weights: array-like of numpy.array
list of not necessarily equal length list of weights
Returns
-------
logZ: numpy.array
evidences, renormalised so that max(logZ) = 0
weights: list of 1D numpy.array
normalised weights
"""
logZ -= logZ.max()
Zs = numpy.exp(logZ)
weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)]
wmax = max([w.max() for w in weights])
weights = [w/wmax for w in weights]
ntot = sum([w.sum() for w in weights])
if ntrim is not None and ntrim < ntot:
weights = [w*ntrim/ntot for w in weights]
return logZ, weights
def _equally_weight_samples(samples, weights):
""" Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))`
"""
if len(weights) != len(samples):
raise ValueError("len(weights) = %i != len(samples) = %i" %
(len(weights), len(samples)))
if numpy.logical_or(weights < 0, weights > 1).any():
raise ValueError("weights must have probability between 0 and 1")
weights = numpy.array(weights)
samples = numpy.array(samples)
state = numpy.random.get_state()
numpy.random.seed(1)
n = len(weights)
choices = numpy.random.rand(n) < weights
new_samples = samples[choices]
numpy.random.set_state(state)
return new_samples.copy()
|
williamjameshandley/fgivenx | fgivenx/_utils.py | _normalise_weights | python | def _normalise_weights(logZ, weights, ntrim=None):
logZ -= logZ.max()
Zs = numpy.exp(logZ)
weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)]
wmax = max([w.max() for w in weights])
weights = [w/wmax for w in weights]
ntot = sum([w.sum() for w in weights])
if ntrim is not None and ntrim < ntot:
weights = [w*ntrim/ntot for w in weights]
return logZ, weights | Correctly normalise the weights for trimming
This takes a list of log-evidences, and re-normalises the weights so that
the largest weight across all samples is 1, and the total weight in each
set of samples is proportional to the evidence.
Parameters
----------
logZ: array-like
log-evidences to weight each set of weights by
weights: array-like of numpy.array
list of not necessarily equal length list of weights
Returns
-------
logZ: numpy.array
evidences, renormalised so that max(logZ) = 0
weights: list of 1D numpy.array
normalised weights | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/_utils.py#L64-L102 | null | import numpy
def _check_args(logZ, f, x, samples, weights):
""" Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples`
"""
# convert to arrays
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
weights = [weights]
# logZ
logZ = numpy.array(logZ, dtype='double')
if len(logZ.shape) is not 1:
raise ValueError("logZ should be a 1D array")
# x
x = numpy.array(x, dtype='double')
if len(x.shape) is not 1:
raise ValueError("x should be a 1D array")
# f
if len(logZ) != len(f):
raise ValueError("len(logZ) = %i != len(f)= %i"
% (len(logZ), len(f)))
for func in f:
if not callable(func):
raise ValueError("first argument f must be function"
"(or list of functions) of two variables")
# samples
if len(logZ) != len(samples):
raise ValueError("len(logZ) = %i != len(samples)= %i"
% (len(logZ), len(samples)))
samples = [numpy.array(s, dtype='double') for s in samples]
for s in samples:
if len(s.shape) is not 2:
raise ValueError("each set of samples should be a 2D array")
# weights
if len(logZ) != len(weights):
raise ValueError("len(logZ) = %i != len(weights)= %i"
% (len(logZ), len(weights)))
weights = [numpy.array(w, dtype='double') if w is not None
else numpy.ones(len(s), dtype='double')
for w, s in zip(weights, samples)]
for w, s in zip(weights, samples):
if len(w.shape) is not 1:
raise ValueError("each set of weights should be a 1D array")
if len(w) != len(s):
raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w)))
return logZ, f, x, samples, weights
def _equally_weight_samples(samples, weights):
""" Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))`
"""
if len(weights) != len(samples):
raise ValueError("len(weights) = %i != len(samples) = %i" %
(len(weights), len(samples)))
if numpy.logical_or(weights < 0, weights > 1).any():
raise ValueError("weights must have probability between 0 and 1")
weights = numpy.array(weights)
samples = numpy.array(samples)
state = numpy.random.get_state()
numpy.random.seed(1)
n = len(weights)
choices = numpy.random.rand(n) < weights
new_samples = samples[choices]
numpy.random.set_state(state)
return new_samples.copy()
|
williamjameshandley/fgivenx | fgivenx/_utils.py | _equally_weight_samples | python | def _equally_weight_samples(samples, weights):
if len(weights) != len(samples):
raise ValueError("len(weights) = %i != len(samples) = %i" %
(len(weights), len(samples)))
if numpy.logical_or(weights < 0, weights > 1).any():
raise ValueError("weights must have probability between 0 and 1")
weights = numpy.array(weights)
samples = numpy.array(samples)
state = numpy.random.get_state()
numpy.random.seed(1)
n = len(weights)
choices = numpy.random.rand(n) < weights
new_samples = samples[choices]
numpy.random.set_state(state)
return new_samples.copy() | Convert samples to be equally weighted.
Samples are trimmed by discarding samples in accordance with a probability
determined by the corresponding weight.
This function has assumed you have normalised the weights properly.
If in doubt, convert weights via: `weights /= weights.max()`
Parameters
----------
samples: array-like
Samples to trim.
weights: array-like
Weights to trim by.
Returns
-------
1D numpy.array:
Equally weighted sample array. `shape=(len(samples))` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/_utils.py#L105-L147 | null | import numpy
def _check_args(logZ, f, x, samples, weights):
""" Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.
Parameters
----------
f, x, samples, weights:
see arguments for :func:`fgivenx.drivers.compute_samples`
"""
# convert to arrays
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
weights = [weights]
# logZ
logZ = numpy.array(logZ, dtype='double')
if len(logZ.shape) is not 1:
raise ValueError("logZ should be a 1D array")
# x
x = numpy.array(x, dtype='double')
if len(x.shape) is not 1:
raise ValueError("x should be a 1D array")
# f
if len(logZ) != len(f):
raise ValueError("len(logZ) = %i != len(f)= %i"
% (len(logZ), len(f)))
for func in f:
if not callable(func):
raise ValueError("first argument f must be function"
"(or list of functions) of two variables")
# samples
if len(logZ) != len(samples):
raise ValueError("len(logZ) = %i != len(samples)= %i"
% (len(logZ), len(samples)))
samples = [numpy.array(s, dtype='double') for s in samples]
for s in samples:
if len(s.shape) is not 2:
raise ValueError("each set of samples should be a 2D array")
# weights
if len(logZ) != len(weights):
raise ValueError("len(logZ) = %i != len(weights)= %i"
% (len(logZ), len(weights)))
weights = [numpy.array(w, dtype='double') if w is not None
else numpy.ones(len(s), dtype='double')
for w, s in zip(weights, samples)]
for w, s in zip(weights, samples):
if len(w.shape) is not 1:
raise ValueError("each set of weights should be a 1D array")
if len(w) != len(s):
raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w)))
return logZ, f, x, samples, weights
def _normalise_weights(logZ, weights, ntrim=None):
""" Correctly normalise the weights for trimming
This takes a list of log-evidences, and re-normalises the weights so that
the largest weight across all samples is 1, and the total weight in each
set of samples is proportional to the evidence.
Parameters
----------
logZ: array-like
log-evidences to weight each set of weights by
weights: array-like of numpy.array
list of not necessarily equal length list of weights
Returns
-------
logZ: numpy.array
evidences, renormalised so that max(logZ) = 0
weights: list of 1D numpy.array
normalised weights
"""
logZ -= logZ.max()
Zs = numpy.exp(logZ)
weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)]
wmax = max([w.max() for w in weights])
weights = [w/wmax for w in weights]
ntot = sum([w.sum() for w in weights])
if ntrim is not None and ntrim < ntot:
weights = [w*ntrim/ntot for w in weights]
return logZ, weights
|
williamjameshandley/fgivenx | fgivenx/samples.py | compute_samples | python | def compute_samples(f, x, samples, **kwargs):
r""" Apply f(x,theta) to x array and theta in samples.
Parameters
----------
f: function
list of functions :math:`f(x;\theta)` with dependent variable
:math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
x values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
list of theta samples to evaluate :math:`f(x;\theta)` at.
`shape = (nfunc, nsamples, npars)`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`
cache: str, optional
File root for saving previous calculations for re-use
default None
Returns
-------
2D numpy.array:
samples at each x. `shape=(len(x),len(samples),)`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_fsamples')
try:
return cache.check(x, samples)
except CacheException as e:
print(e)
fsamples = []
for fi, s in zip(f, samples):
if len(s) > 0:
fsamps = parallel_apply(fi, s, precurry=(x,), parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
fsamps = numpy.array(fsamps).transpose().copy()
fsamples.append(fsamps)
fsamples = numpy.concatenate(fsamples, axis=1)
if cache:
cache.save(x, samples, fsamples)
return fsamples | r""" Apply f(x,theta) to x array and theta in samples.
Parameters
----------
f: function
list of functions :math:`f(x;\theta)` with dependent variable
:math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
x values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
list of theta samples to evaluate :math:`f(x;\theta)` at.
`shape = (nfunc, nsamples, npars)`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`
cache: str, optional
File root for saving previous calculations for re-use
default None
Returns
-------
2D numpy.array:
samples at each x. `shape=(len(x),len(samples),)` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/samples.py#L6-L60 | [
"def parallel_apply(f, array, **kwargs):\n \"\"\" Apply a function to an array with openmp parallelisation.\n\n Equivalent to `[f(x) for x in array]`, but parallelised if required.\n\n Parameters\n ----------\n f: function\n Univariate function to apply to each element of array\n\n array: array-like\n Array to apply f to\n\n parallel: int or bool, optional\n int > 0: number of processes to parallelise over\n\n int < 0 or bool=True: use OMP_NUM_THREADS to choose parallelisation\n\n bool=False or int=0: do not parallelise\n\n tqdm_kwargs: dict, optional\n additional kwargs for tqdm progress bars.\n\n precurry: tuple, optional\n immutable arguments to pass to f before x,\n i.e. `[f(precurry,x) for x in array]`\n\n postcurry: tuple, optional\n immutable arguments to pass to f after x\n i.e. `[f(x,postcurry) for x in array]`\n\n Returns\n -------\n list:\n `[f(precurry,x,postcurry) for x in array]`\n parallelised according to parallel\n \"\"\"\n\n precurry = tuple(kwargs.pop('precurry', ()))\n postcurry = tuple(kwargs.pop('postcurry', ()))\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n try:\n # If running in a jupyter notebook then use tqdm_notebook.\n progress = tqdm_notebook if get_ipython().has_trait('kernel') else tqdm\n except (NameError, AssertionError):\n # Otherwise use regular tqdm progress bar\n progress = tqdm\n if not parallel:\n return [f(*(precurry + (x,) + postcurry)) for x in\n progress(array, **tqdm_kwargs)]\n elif parallel is True:\n parallel = cpu_count()\n elif isinstance(parallel, int):\n if parallel < 0:\n parallel = cpu_count()\n else:\n parallel = parallel\n else:\n raise ValueError(\"parallel keyword must be an integer or bool\")\n\n if parallel and not PARALLEL:\n warnings.warn(\"You need to install the package joblib\"\n \"if you want to use parallelisation\")\n\n return Parallel(n_jobs=parallel)(delayed(f)(*(precurry + (x,) + postcurry))\n for x in progress(array, **tqdm_kwargs))\n",
"def check(self, *args):\n \"\"\" Check that the arguments haven't changed since the last call.\n\n Parameters\n ----------\n *args:\n All but the last argument are inputs to the cached function. The\n last is the actual value of the function.\n\n Returns\n -------\n If arguments unchanged:\n return the cached answer\n else:\n indicate recomputation required by throwing a\n :class:`CacheException`.\n \"\"\"\n data = self.load()\n\n if len(data)-1 != len(args):\n raise ValueError(\"Wrong number of arguments passed to Cache.check\")\n\n try:\n for x, x_check in zip(data, args):\n if isinstance(x, list):\n if len(x) != len(x_check):\n raise CacheException\n for x_i, x_check_i in zip(x, x_check):\n if x_i.shape != x_check_i.shape:\n raise CacheException\n elif not numpy.allclose(x_i, x_check_i,\n equal_nan=True):\n raise CacheException\n elif x.shape != x_check.shape:\n raise CacheException\n elif not numpy.allclose(x, x_check, equal_nan=True):\n raise CacheException\n\n except CacheException:\n raise CacheChanged(self.file_root)\n\n print(CacheOK(self.file_root))\n return data[-1]\n",
"def save(self, *args):\n \"\"\" Save cache to file using pickle.\n\n Parameters\n ----------\n *args:\n All but the last argument are inputs to the cached function. The\n last is the actual value of the function.\n \"\"\"\n with open(self.file_root + '.pkl', \"wb\") as f:\n pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)\n"
] | import numpy
from fgivenx.parallel import parallel_apply
from fgivenx.io import CacheException, Cache
def samples_from_getdist_chains(params, file_root, latex=False):
""" Extract samples and weights from getdist chains.
Parameters
----------
params: list(str)
Names of parameters to be supplied to second argument of f(x|theta).
file_root: str, optional
Root name for getdist chains files. This variable automatically
defines:
- chains_file = file_root.txt
- paramnames_file = file_root.paramnames
but can be overidden by chains_file or paramnames_file.
latex: bool, optional
Also return an array of latex strings for those paramnames.
Returns
-------
samples: numpy.array
2D Array of samples. `shape=(len(samples), len(params))`
weights: numpy.array
Array of weights. `shape = (len(params),)`
latex: list(str), optional
list of latex strings for each parameter
(if latex is provided as an argument)
"""
import getdist
samples = getdist.loadMCSamples(file_root)
weights = samples.weights
indices = [samples.index[p] for p in params]
samps = samples.samples[:, indices]
if latex:
latex = [samples.parLabel(p) for p in params]
return samps, weights, latex
else:
return samps, weights
|
williamjameshandley/fgivenx | fgivenx/samples.py | samples_from_getdist_chains | python | def samples_from_getdist_chains(params, file_root, latex=False):
import getdist
samples = getdist.loadMCSamples(file_root)
weights = samples.weights
indices = [samples.index[p] for p in params]
samps = samples.samples[:, indices]
if latex:
latex = [samples.parLabel(p) for p in params]
return samps, weights, latex
else:
return samps, weights | Extract samples and weights from getdist chains.
Parameters
----------
params: list(str)
Names of parameters to be supplied to second argument of f(x|theta).
file_root: str, optional
Root name for getdist chains files. This variable automatically
defines:
- chains_file = file_root.txt
- paramnames_file = file_root.paramnames
but can be overidden by chains_file or paramnames_file.
latex: bool, optional
Also return an array of latex strings for those paramnames.
Returns
-------
samples: numpy.array
2D Array of samples. `shape=(len(samples), len(params))`
weights: numpy.array
Array of weights. `shape = (len(params),)`
latex: list(str), optional
list of latex strings for each parameter
(if latex is provided as an argument) | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/samples.py#L63-L104 | null | import numpy
from fgivenx.parallel import parallel_apply
from fgivenx.io import CacheException, Cache
def compute_samples(f, x, samples, **kwargs):
r""" Apply f(x,theta) to x array and theta in samples.
Parameters
----------
f: function
list of functions :math:`f(x;\theta)` with dependent variable
:math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
x values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
list of theta samples to evaluate :math:`f(x;\theta)` at.
`shape = (nfunc, nsamples, npars)`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`
cache: str, optional
File root for saving previous calculations for re-use
default None
Returns
-------
2D numpy.array:
samples at each x. `shape=(len(x),len(samples),)`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_fsamples')
try:
return cache.check(x, samples)
except CacheException as e:
print(e)
fsamples = []
for fi, s in zip(f, samples):
if len(s) > 0:
fsamps = parallel_apply(fi, s, precurry=(x,), parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
fsamps = numpy.array(fsamps).transpose().copy()
fsamples.append(fsamps)
fsamples = numpy.concatenate(fsamples, axis=1)
if cache:
cache.save(x, samples, fsamples)
return fsamples
|
williamjameshandley/fgivenx | fgivenx/drivers.py | plot_contours | python | def plot_contours(f, x, samples, ax=None, **kwargs):
r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
y, pmf = compute_pmf(f, x, samples, weights=weights, logZ=logZ,
ntrim=ntrim, ny=ny, y=y,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(x, y, pmf, ax, **kwargs)
return cbar | r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/drivers.py#L42-L127 | [
"def compute_pmf(f, x, samples, **kwargs):\n r\"\"\"\n Compute the probability mass function given `x` at a range of `x` values\n for :math:`y = f(x|\\theta)`\n\n :math:`P(y|x) = \\int P(y=f(x;\\theta)|x,\\theta) P(\\theta) d\\theta`\n\n :math:`\\mathrm{pmf}(y|x) = \\int_{P(y'|x) < P(y|x)} P(y'|x) dy'`\n\n Additionally, if a list of log-evidences are passed, along with list of\n functions, samples and optional weights it marginalises over the models\n according to the evidences.\n\n Parameters\n ----------\n f: function\n function :math:`f(x;\\theta)` (or list of functions for each model) with\n dependent variable :math:`x`, parameterised by :math:`\\theta`.\n\n x: 1D array-like\n `x` values to evaluate :math:`f(x;\\theta)` at.\n\n samples: 2D array-like\n :math:`\\theta` samples (or list of :math:`\\theta` samples) to evaluate\n :math:`f(x;\\theta)` at.\n `shape = (nsamples, npars)`\n\n logZ: 1D array-like, optional\n log-evidences of each model if multiple models are passed.\n Should be same length as the list `f`, and need not be normalised.\n Default: `numpy.ones_like(f)`\n\n weights: 1D array-like, optional\n sample weights (or list of weights), if desired. Should have length\n same as `samples.shape[0]`.\n Default: `numpy.ones_like(samples)`\n\n ny: int, optional\n Resolution of y axis.\n Default: `100`\n\n y: array-like, optional\n Explicit descriptor of `y` values to evaluate.\n Default: `numpy.linspace(min(f), max(f), ny)`\n\n ntrim: int, optional\n Approximate number of samples to trim down to, if desired. Useful if\n the posterior is dramatically oversampled.\n Default: None\n\n cache: str, optional\n File root for saving previous calculations for re-use\n\n parallel, tqdm_args:\n see docstring for :func:`fgivenx.parallel.parallel_apply`\n\n\n Returns\n -------\n 1D numpy.array:\n `y` values pmf is computed at `shape=(len(y))` or `ny`\n 2D numpy.array:\n pmf values at each `x` and `y` `shape=(len(x),len(y))`\n \"\"\"\n\n logZ = kwargs.pop('logZ', None)\n weights = kwargs.pop('weights', None)\n ny = kwargs.pop('ny', 100)\n y = kwargs.pop('y', None)\n ntrim = kwargs.pop('ntrim', 100000)\n parallel = kwargs.pop('parallel', False)\n cache = kwargs.pop('cache', '')\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n # y\n if y is not None:\n y = numpy.array(y, dtype='double')\n if len(y.shape) is not 1:\n raise ValueError(\"y should be a 1D array\")\n\n fsamps = compute_samples(f, x, samples, logZ=logZ,\n weights=weights, ntrim=ntrim,\n parallel=parallel, cache=cache,\n tqdm_kwargs=tqdm_kwargs)\n\n if y is None:\n ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)\n ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)\n y = numpy.linspace(ymin, ymax, ny)\n\n return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,\n cache=cache, tqdm_kwargs=tqdm_kwargs)\n",
"def plot(x, y, z, ax=None, **kwargs):\n r\"\"\"\n Plot iso-probability mass function, converted to sigmas.\n\n Parameters\n ----------\n x, y, z : numpy arrays\n Same as arguments to :func:`matplotlib.pyplot.contour`\n\n ax: axes object, optional\n :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours\n onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to\n get the last axis used, or create a new one.\n\n colors: color scheme, optional\n :class:`matplotlib.colors.LinearSegmentedColormap`\n Color scheme to plot with. Recommend plotting in reverse\n (Default: :class:`matplotlib.pyplot.cm.Reds_r`)\n\n smooth: float, optional\n Percentage by which to smooth the contours.\n (Default: no smoothing)\n\n contour_line_levels: List[float], optional\n Contour lines to be plotted. (Default: [1,2])\n\n linewidths: float, optional\n Thickness of contour lines. (Default: 0.3)\n\n contour_color_levels: List[float], optional\n Contour color levels.\n (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`)\n\n fineness: float, optional\n Spacing of contour color levels. (Default: 0.1)\n\n lines: bool, optional\n (Default: True)\n\n rasterize_contours: bool, optional\n Rasterize the contours while keeping the lines, text etc in vector\n format. Useful for reducing file size bloat and making printing\n easier when you have dense contours.\n (Default: False)\n\n Returns\n -------\n cbar: color bar\n :class:`matplotlib.contour.QuadContourSet`\n Colors to create a global colour bar\n \"\"\"\n if ax is None:\n ax = matplotlib.pyplot.gca()\n # Get inputs\n colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r)\n smooth = kwargs.pop('smooth', False)\n\n linewidths = kwargs.pop('linewidths', 0.3)\n contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3])\n\n fineness = kwargs.pop('fineness', 0.5)\n default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1,\n fineness)\n contour_color_levels = kwargs.pop('contour_color_levels',\n default_color_levels)\n\n rasterize_contours = kwargs.pop('rasterize_contours', False)\n\n lines = kwargs.pop('lines', True)\n\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n # Convert to sigmas\n z = numpy.sqrt(2) * scipy.special.erfinv(1 - z)\n\n # Gaussian filter if desired the sigmas by a factor of smooth%\n if smooth:\n sigma = smooth*numpy.array(z.shape)/100.0\n z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0)\n\n # Plot the filled contours onto the axis ax\n cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels)\n\n # Rasterize contours (the rest of the figure stays in vector format)\n if rasterize_contours:\n for c in cbar.collections:\n c.set_rasterized(True)\n\n # Remove those annoying white lines\n for c in cbar.collections:\n c.set_edgecolor(\"face\")\n\n # Plot some sigma-based contour lines\n if lines:\n ax.contour(x, y, z, colors='k', linewidths=linewidths,\n levels=contour_line_levels)\n\n # Return the contours for use as a colourbar later\n return cbar\n"
] | r"""This module provides utilities for computing the grid for contours of a
function reconstruction plot.
Required ingredients:
* sampled posterior probability distribution :math:`P(\theta)`
* independent variable :math:`x`
* dependent variable :math:`y`
* functional form :math:`y = f(x;\theta)` parameterised by :math:`\theta`
Assuming that you have obtained samples of :math:`\theta` from an MCMC
process, we aim to compute the density:
.. math::
P(y|x) &= \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta \\
&= \int \delta(y-f(x;\theta)) P(\theta) d\theta
which gives our degree of knowledge for each :math:`y=f(x;\theta)` value
given an :math:`x` value.
In fact, for a more representative plot, we are not actually
interested in the value of the probability density above, but in fact
require the "iso-probablity posterior mass"
.. math::
\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'
We thus need to compute this function on a rectangular grid of :math:`x`
and :math:`y`.
"""
import numpy
import fgivenx.samples
import fgivenx.mass
import fgivenx.dkl
import fgivenx.plot
import matplotlib.pyplot as plt
from fgivenx._utils import _check_args, _normalise_weights,\
_equally_weight_samples
def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs)
def plot_dkl(f, x, samples, prior_samples, ax=None, **kwargs):
r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
dkls = compute_dkl(f, x, samples, prior_samples,
logZ=logZ, parallel=parallel,
cache=cache, prior_cache=prior_cache,
tqdm_kwargs=tqdm_kwargs,
ntrim=ntrim, weights=weights,
prior_weights=prior_weights)
if ax is None:
ax = plt.gca()
ax.plot(x, dkls, **kwargs)
def compute_samples(f, x, samples, **kwargs):
r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)
logZ, weights = _normalise_weights(logZ, weights, ntrim)
for i, (s, w) in enumerate(zip(samples, weights)):
samples[i] = _equally_weight_samples(s, w)
return fgivenx.samples.compute_samples(f, x, samples,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
def compute_pmf(f, x, samples, **kwargs):
r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
ntrim = kwargs.pop('ntrim', 100000)
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# y
if y is not None:
y = numpy.array(y, dtype='double')
if len(y.shape) is not 1:
raise ValueError("y should be a 1D array")
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
if y is None:
ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)
ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)
y = numpy.linspace(ymin, ymax, ny)
return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,
cache=cache, tqdm_kwargs=tqdm_kwargs)
def compute_dkl(f, x, samples, prior_samples, **kwargs):
r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
prior_samples = [prior_samples]
weights = [weights]
prior_weights = [prior_weights]
cache = [cache]
prior_cache = [prior_cache]
DKLs = []
for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,
prior_samples, prior_weights):
fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,
parallel=parallel, cache=pc,
tqdm_kwargs=tqdm_kwargs)
dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
DKLs.append(dkls)
logZ = numpy.array(logZ)
DKLs = numpy.array(DKLs)
Zs = numpy.exp(logZ-logZ.max())
Zs /= Zs.sum()
return numpy.sum(Zs * DKLs.transpose(), axis=1)
|
williamjameshandley/fgivenx | fgivenx/drivers.py | plot_lines | python | def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs) | r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/drivers.py#L130-L193 | [
"def plot_lines(x, fsamps, ax=None, downsample=100, **kwargs):\n \"\"\"\n Plot function samples as a set of line plots.\n\n Parameters\n ----------\n x: 1D array-like\n x values to plot\n\n fsamps: 2D array-like\n set of functions to plot at each x. As returned by\n :func:`fgivenx.compute_samples`\n\n ax: axes object\n :class:`matplotlib.pyplot.ax` to plot on.\n\n downsample: int, optional\n Reduce the number of samples to a viewable quantity. (Default 100)\n\n any other keywords are passed to :meth:`matplotlib.pyplot.ax.plot`\n \"\"\"\n if ax is None:\n ax = matplotlib.pyplot.gca()\n if downsample < len(fsamps.T):\n indices = numpy.random.choice(len(fsamps.T), downsample, replace=False)\n else:\n indices = numpy.arange(len(fsamps.T))\n color = kwargs.pop('color', 'k')\n alpha = kwargs.pop('alpha', 0.1)\n for y in fsamps.T[indices]:\n ax.plot(x, y, color=color, alpha=alpha, **kwargs)\n",
"def compute_samples(f, x, samples, **kwargs):\n r\"\"\"\n Apply the function(s) :math:`f(x;\\theta)` to the arrays defined in `x` and\n `samples`. Has options for weighting, trimming, cacheing & parallelising.\n\n Additionally, if a list of log-evidences are passed, along with list of\n functions, samples and optional weights it marginalises over the models\n according to the evidences.\n\n Parameters\n ----------\n f: function\n function :math:`f(x;\\theta)` (or list of functions for each model) with\n dependent variable :math:`x`, parameterised by :math:`\\theta`.\n\n x: 1D array-like\n `x` values to evaluate :math:`f(x;\\theta)` at.\n\n samples: 2D array-like\n :math:`\\theta` samples (or list of :math:`\\theta` samples) to evaluate\n :math:`f(x;\\theta)` at.\n `shape = (nsamples, npars)`\n\n logZ: 1D array-like, optional\n log-evidences of each model if multiple models are passed.\n Should be same length as the list `f`, and need not be normalised.\n Default: `numpy.ones_like(f)`\n\n weights: 1D array-like, optional\n sample weights (or list of weights), if desired. Should have length\n same as `samples.shape[0]`.\n Default: `numpy.ones_like(samples)`\n\n ntrim: int, optional\n Approximate number of samples to trim down to, if desired. Useful if\n the posterior is dramatically oversampled.\n Default: None\n\n cache: str, optional\n File root for saving previous calculations for re-use.\n Default: None\n\n parallel, tqdm_args:\n see docstring for :func:`fgivenx.parallel.parallel_apply`\n\n Returns\n -------\n 2D numpy.array\n Evaluate the function `f` at each x value and each theta.\n Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`\n \"\"\"\n logZ = kwargs.pop('logZ', None)\n weights = kwargs.pop('weights', None)\n ntrim = kwargs.pop('ntrim', None)\n cache = kwargs.pop('cache', '')\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)\n\n logZ, weights = _normalise_weights(logZ, weights, ntrim)\n\n for i, (s, w) in enumerate(zip(samples, weights)):\n samples[i] = _equally_weight_samples(s, w)\n\n return fgivenx.samples.compute_samples(f, x, samples,\n parallel=parallel, cache=cache,\n tqdm_kwargs=tqdm_kwargs)\n"
] | r"""This module provides utilities for computing the grid for contours of a
function reconstruction plot.
Required ingredients:
* sampled posterior probability distribution :math:`P(\theta)`
* independent variable :math:`x`
* dependent variable :math:`y`
* functional form :math:`y = f(x;\theta)` parameterised by :math:`\theta`
Assuming that you have obtained samples of :math:`\theta` from an MCMC
process, we aim to compute the density:
.. math::
P(y|x) &= \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta \\
&= \int \delta(y-f(x;\theta)) P(\theta) d\theta
which gives our degree of knowledge for each :math:`y=f(x;\theta)` value
given an :math:`x` value.
In fact, for a more representative plot, we are not actually
interested in the value of the probability density above, but in fact
require the "iso-probablity posterior mass"
.. math::
\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'
We thus need to compute this function on a rectangular grid of :math:`x`
and :math:`y`.
"""
import numpy
import fgivenx.samples
import fgivenx.mass
import fgivenx.dkl
import fgivenx.plot
import matplotlib.pyplot as plt
from fgivenx._utils import _check_args, _normalise_weights,\
_equally_weight_samples
def plot_contours(f, x, samples, ax=None, **kwargs):
r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
y, pmf = compute_pmf(f, x, samples, weights=weights, logZ=logZ,
ntrim=ntrim, ny=ny, y=y,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(x, y, pmf, ax, **kwargs)
return cbar
def plot_dkl(f, x, samples, prior_samples, ax=None, **kwargs):
r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
dkls = compute_dkl(f, x, samples, prior_samples,
logZ=logZ, parallel=parallel,
cache=cache, prior_cache=prior_cache,
tqdm_kwargs=tqdm_kwargs,
ntrim=ntrim, weights=weights,
prior_weights=prior_weights)
if ax is None:
ax = plt.gca()
ax.plot(x, dkls, **kwargs)
def compute_samples(f, x, samples, **kwargs):
r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)
logZ, weights = _normalise_weights(logZ, weights, ntrim)
for i, (s, w) in enumerate(zip(samples, weights)):
samples[i] = _equally_weight_samples(s, w)
return fgivenx.samples.compute_samples(f, x, samples,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
def compute_pmf(f, x, samples, **kwargs):
r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
ntrim = kwargs.pop('ntrim', 100000)
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# y
if y is not None:
y = numpy.array(y, dtype='double')
if len(y.shape) is not 1:
raise ValueError("y should be a 1D array")
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
if y is None:
ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)
ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)
y = numpy.linspace(ymin, ymax, ny)
return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,
cache=cache, tqdm_kwargs=tqdm_kwargs)
def compute_dkl(f, x, samples, prior_samples, **kwargs):
r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
prior_samples = [prior_samples]
weights = [weights]
prior_weights = [prior_weights]
cache = [cache]
prior_cache = [prior_cache]
DKLs = []
for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,
prior_samples, prior_weights):
fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,
parallel=parallel, cache=pc,
tqdm_kwargs=tqdm_kwargs)
dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
DKLs.append(dkls)
logZ = numpy.array(logZ)
DKLs = numpy.array(DKLs)
Zs = numpy.exp(logZ-logZ.max())
Zs /= Zs.sum()
return numpy.sum(Zs * DKLs.transpose(), axis=1)
|
williamjameshandley/fgivenx | fgivenx/drivers.py | plot_dkl | python | def plot_dkl(f, x, samples, prior_samples, ax=None, **kwargs):
r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
dkls = compute_dkl(f, x, samples, prior_samples,
logZ=logZ, parallel=parallel,
cache=cache, prior_cache=prior_cache,
tqdm_kwargs=tqdm_kwargs,
ntrim=ntrim, weights=weights,
prior_weights=prior_weights)
if ax is None:
ax = plt.gca()
ax.plot(x, dkls, **kwargs) | r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/drivers.py#L196-L280 | [
"def compute_dkl(f, x, samples, prior_samples, **kwargs):\n r\"\"\"\n Compute the Kullback-Leibler divergence at each value of `x` for the prior\n and posterior defined by `prior_samples` and `samples`.\n\n Parameters\n ----------\n f: function\n function :math:`f(x;\\theta)` (or list of functions for each model) with\n dependent variable :math:`x`, parameterised by :math:`\\theta`.\n\n x: 1D array-like\n `x` values to evaluate :math:`f(x;\\theta)` at.\n\n samples, prior_samples: 2D array-like\n :math:`\\theta` samples (or list of :math:`\\theta` samples) from\n posterior and prior to evaluate :math:`f(x;\\theta)` at.\n `shape = (nsamples, npars)`\n\n logZ: 1D array-like, optional\n log-evidences of each model if multiple models are passed.\n Should be same length as the list `f`, and need not be normalised.\n Default: `numpy.ones_like(f)`\n\n weights, prior_weights: 1D array-like, optional\n sample weights (or list of weights), if desired. Should have length\n same as `samples.shape[0]`.\n Default: `numpy.ones_like(samples)`\n\n ntrim: int, optional\n Approximate number of samples to trim down to, if desired. Useful if\n the posterior is dramatically oversampled.\n Default: None\n\n cache, prior_cache: str, optional\n File roots for saving previous calculations for re-use\n\n parallel, tqdm_args:\n see docstring for :func:`fgivenx.parallel.parallel_apply`\n\n kwargs: further keyword arguments\n Any further keyword arguments are plotting keywords that are passed to\n :func:`fgivenx.plot.plot`.\n\n Returns\n -------\n 1D numpy array:\n dkl values at each value of `x`.\n \"\"\"\n\n logZ = kwargs.pop('logZ', None)\n weights = kwargs.pop('weights', None)\n prior_weights = kwargs.pop('prior_weights', None)\n ntrim = kwargs.pop('ntrim', None)\n cache = kwargs.pop('cache', '')\n prior_cache = kwargs.pop('prior_cache', '')\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n if logZ is None:\n logZ = [0]\n f = [f]\n samples = [samples]\n prior_samples = [prior_samples]\n weights = [weights]\n prior_weights = [prior_weights]\n cache = [cache]\n prior_cache = [prior_cache]\n\n DKLs = []\n\n for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,\n prior_samples, prior_weights):\n\n fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,\n parallel=parallel, cache=c,\n tqdm_kwargs=tqdm_kwargs)\n\n fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,\n parallel=parallel, cache=pc,\n tqdm_kwargs=tqdm_kwargs)\n\n dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,\n parallel=parallel, cache=c,\n tqdm_kwargs=tqdm_kwargs)\n DKLs.append(dkls)\n\n logZ = numpy.array(logZ)\n DKLs = numpy.array(DKLs)\n\n Zs = numpy.exp(logZ-logZ.max())\n Zs /= Zs.sum()\n return numpy.sum(Zs * DKLs.transpose(), axis=1)\n"
] | r"""This module provides utilities for computing the grid for contours of a
function reconstruction plot.
Required ingredients:
* sampled posterior probability distribution :math:`P(\theta)`
* independent variable :math:`x`
* dependent variable :math:`y`
* functional form :math:`y = f(x;\theta)` parameterised by :math:`\theta`
Assuming that you have obtained samples of :math:`\theta` from an MCMC
process, we aim to compute the density:
.. math::
P(y|x) &= \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta \\
&= \int \delta(y-f(x;\theta)) P(\theta) d\theta
which gives our degree of knowledge for each :math:`y=f(x;\theta)` value
given an :math:`x` value.
In fact, for a more representative plot, we are not actually
interested in the value of the probability density above, but in fact
require the "iso-probablity posterior mass"
.. math::
\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'
We thus need to compute this function on a rectangular grid of :math:`x`
and :math:`y`.
"""
import numpy
import fgivenx.samples
import fgivenx.mass
import fgivenx.dkl
import fgivenx.plot
import matplotlib.pyplot as plt
from fgivenx._utils import _check_args, _normalise_weights,\
_equally_weight_samples
def plot_contours(f, x, samples, ax=None, **kwargs):
r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
y, pmf = compute_pmf(f, x, samples, weights=weights, logZ=logZ,
ntrim=ntrim, ny=ny, y=y,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(x, y, pmf, ax, **kwargs)
return cbar
def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs)
def compute_samples(f, x, samples, **kwargs):
r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)
logZ, weights = _normalise_weights(logZ, weights, ntrim)
for i, (s, w) in enumerate(zip(samples, weights)):
samples[i] = _equally_weight_samples(s, w)
return fgivenx.samples.compute_samples(f, x, samples,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
def compute_pmf(f, x, samples, **kwargs):
r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
ntrim = kwargs.pop('ntrim', 100000)
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# y
if y is not None:
y = numpy.array(y, dtype='double')
if len(y.shape) is not 1:
raise ValueError("y should be a 1D array")
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
if y is None:
ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)
ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)
y = numpy.linspace(ymin, ymax, ny)
return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,
cache=cache, tqdm_kwargs=tqdm_kwargs)
def compute_dkl(f, x, samples, prior_samples, **kwargs):
r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
prior_samples = [prior_samples]
weights = [weights]
prior_weights = [prior_weights]
cache = [cache]
prior_cache = [prior_cache]
DKLs = []
for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,
prior_samples, prior_weights):
fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,
parallel=parallel, cache=pc,
tqdm_kwargs=tqdm_kwargs)
dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
DKLs.append(dkls)
logZ = numpy.array(logZ)
DKLs = numpy.array(DKLs)
Zs = numpy.exp(logZ-logZ.max())
Zs /= Zs.sum()
return numpy.sum(Zs * DKLs.transpose(), axis=1)
|
williamjameshandley/fgivenx | fgivenx/drivers.py | compute_samples | python | def compute_samples(f, x, samples, **kwargs):
r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)
logZ, weights = _normalise_weights(logZ, weights, ntrim)
for i, (s, w) in enumerate(zip(samples, weights)):
samples[i] = _equally_weight_samples(s, w)
return fgivenx.samples.compute_samples(f, x, samples,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs) | r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/drivers.py#L283-L352 | [
"def _check_args(logZ, f, x, samples, weights):\n \"\"\" Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`.\n\n Parameters\n ----------\n f, x, samples, weights:\n see arguments for :func:`fgivenx.drivers.compute_samples`\n \"\"\"\n # convert to arrays\n if logZ is None:\n logZ = [0]\n f = [f]\n samples = [samples]\n weights = [weights]\n\n # logZ\n logZ = numpy.array(logZ, dtype='double')\n if len(logZ.shape) is not 1:\n raise ValueError(\"logZ should be a 1D array\")\n\n # x\n x = numpy.array(x, dtype='double')\n if len(x.shape) is not 1:\n raise ValueError(\"x should be a 1D array\")\n\n # f\n if len(logZ) != len(f):\n raise ValueError(\"len(logZ) = %i != len(f)= %i\"\n % (len(logZ), len(f)))\n for func in f:\n if not callable(func):\n raise ValueError(\"first argument f must be function\"\n \"(or list of functions) of two variables\")\n\n # samples\n if len(logZ) != len(samples):\n raise ValueError(\"len(logZ) = %i != len(samples)= %i\"\n % (len(logZ), len(samples)))\n samples = [numpy.array(s, dtype='double') for s in samples]\n for s in samples:\n if len(s.shape) is not 2:\n raise ValueError(\"each set of samples should be a 2D array\")\n\n # weights\n if len(logZ) != len(weights):\n raise ValueError(\"len(logZ) = %i != len(weights)= %i\"\n % (len(logZ), len(weights)))\n weights = [numpy.array(w, dtype='double') if w is not None\n else numpy.ones(len(s), dtype='double')\n for w, s in zip(weights, samples)]\n\n for w, s in zip(weights, samples):\n if len(w.shape) is not 1:\n raise ValueError(\"each set of weights should be a 1D array\")\n if len(w) != len(s):\n raise ValueError(\"len(w) = %i != len(s) = %i\" % (len(s), len(w)))\n\n return logZ, f, x, samples, weights\n",
"def _normalise_weights(logZ, weights, ntrim=None):\n \"\"\" Correctly normalise the weights for trimming\n\n This takes a list of log-evidences, and re-normalises the weights so that\n the largest weight across all samples is 1, and the total weight in each\n set of samples is proportional to the evidence.\n\n Parameters\n ----------\n logZ: array-like\n log-evidences to weight each set of weights by\n\n weights: array-like of numpy.array\n list of not necessarily equal length list of weights\n\n Returns\n -------\n logZ: numpy.array\n evidences, renormalised so that max(logZ) = 0\n\n weights: list of 1D numpy.array\n normalised weights\n \"\"\"\n logZ -= logZ.max()\n\n Zs = numpy.exp(logZ)\n\n weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)]\n\n wmax = max([w.max() for w in weights])\n\n weights = [w/wmax for w in weights]\n\n ntot = sum([w.sum() for w in weights])\n\n if ntrim is not None and ntrim < ntot:\n weights = [w*ntrim/ntot for w in weights]\n\n return logZ, weights\n",
"def _equally_weight_samples(samples, weights):\n \"\"\" Convert samples to be equally weighted.\n\n Samples are trimmed by discarding samples in accordance with a probability\n determined by the corresponding weight.\n\n This function has assumed you have normalised the weights properly.\n If in doubt, convert weights via: `weights /= weights.max()`\n\n Parameters\n ----------\n samples: array-like\n Samples to trim.\n\n weights: array-like\n Weights to trim by.\n\n Returns\n -------\n 1D numpy.array:\n Equally weighted sample array. `shape=(len(samples))`\n \"\"\"\n if len(weights) != len(samples):\n raise ValueError(\"len(weights) = %i != len(samples) = %i\" %\n (len(weights), len(samples)))\n\n if numpy.logical_or(weights < 0, weights > 1).any():\n raise ValueError(\"weights must have probability between 0 and 1\")\n\n weights = numpy.array(weights)\n samples = numpy.array(samples)\n\n state = numpy.random.get_state()\n\n numpy.random.seed(1)\n n = len(weights)\n choices = numpy.random.rand(n) < weights\n\n new_samples = samples[choices]\n\n numpy.random.set_state(state)\n\n return new_samples.copy()\n",
"def compute_samples(f, x, samples, **kwargs):\n r\"\"\" Apply f(x,theta) to x array and theta in samples.\n\n Parameters\n ----------\n f: function\n list of functions :math:`f(x;\\theta)` with dependent variable\n :math:`x`, parameterised by :math:`\\theta`.\n\n x: 1D array-like\n x values to evaluate :math:`f(x;\\theta)` at.\n\n samples: 2D array-like\n list of theta samples to evaluate :math:`f(x;\\theta)` at.\n `shape = (nfunc, nsamples, npars)`\n\n parallel, tqdm_kwargs: optional\n see docstring for :func:`fgivenx.parallel.parallel_apply`\n\n cache: str, optional\n File root for saving previous calculations for re-use\n default None\n\n Returns\n -------\n 2D numpy.array:\n samples at each x. `shape=(len(x),len(samples),)`\n \"\"\"\n\n parallel = kwargs.pop('parallel', False)\n cache = kwargs.pop('cache', '')\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n if cache:\n cache = Cache(cache + '_fsamples')\n try:\n return cache.check(x, samples)\n except CacheException as e:\n print(e)\n\n fsamples = []\n for fi, s in zip(f, samples):\n if len(s) > 0:\n fsamps = parallel_apply(fi, s, precurry=(x,), parallel=parallel,\n tqdm_kwargs=tqdm_kwargs)\n fsamps = numpy.array(fsamps).transpose().copy()\n fsamples.append(fsamps)\n fsamples = numpy.concatenate(fsamples, axis=1)\n\n if cache:\n cache.save(x, samples, fsamples)\n\n return fsamples\n"
] | r"""This module provides utilities for computing the grid for contours of a
function reconstruction plot.
Required ingredients:
* sampled posterior probability distribution :math:`P(\theta)`
* independent variable :math:`x`
* dependent variable :math:`y`
* functional form :math:`y = f(x;\theta)` parameterised by :math:`\theta`
Assuming that you have obtained samples of :math:`\theta` from an MCMC
process, we aim to compute the density:
.. math::
P(y|x) &= \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta \\
&= \int \delta(y-f(x;\theta)) P(\theta) d\theta
which gives our degree of knowledge for each :math:`y=f(x;\theta)` value
given an :math:`x` value.
In fact, for a more representative plot, we are not actually
interested in the value of the probability density above, but in fact
require the "iso-probablity posterior mass"
.. math::
\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'
We thus need to compute this function on a rectangular grid of :math:`x`
and :math:`y`.
"""
import numpy
import fgivenx.samples
import fgivenx.mass
import fgivenx.dkl
import fgivenx.plot
import matplotlib.pyplot as plt
from fgivenx._utils import _check_args, _normalise_weights,\
_equally_weight_samples
def plot_contours(f, x, samples, ax=None, **kwargs):
r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
y, pmf = compute_pmf(f, x, samples, weights=weights, logZ=logZ,
ntrim=ntrim, ny=ny, y=y,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(x, y, pmf, ax, **kwargs)
return cbar
def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs)
def plot_dkl(f, x, samples, prior_samples, ax=None, **kwargs):
r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
dkls = compute_dkl(f, x, samples, prior_samples,
logZ=logZ, parallel=parallel,
cache=cache, prior_cache=prior_cache,
tqdm_kwargs=tqdm_kwargs,
ntrim=ntrim, weights=weights,
prior_weights=prior_weights)
if ax is None:
ax = plt.gca()
ax.plot(x, dkls, **kwargs)
def compute_pmf(f, x, samples, **kwargs):
r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
ntrim = kwargs.pop('ntrim', 100000)
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# y
if y is not None:
y = numpy.array(y, dtype='double')
if len(y.shape) is not 1:
raise ValueError("y should be a 1D array")
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
if y is None:
ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)
ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)
y = numpy.linspace(ymin, ymax, ny)
return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,
cache=cache, tqdm_kwargs=tqdm_kwargs)
def compute_dkl(f, x, samples, prior_samples, **kwargs):
r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
prior_samples = [prior_samples]
weights = [weights]
prior_weights = [prior_weights]
cache = [cache]
prior_cache = [prior_cache]
DKLs = []
for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,
prior_samples, prior_weights):
fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,
parallel=parallel, cache=pc,
tqdm_kwargs=tqdm_kwargs)
dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
DKLs.append(dkls)
logZ = numpy.array(logZ)
DKLs = numpy.array(DKLs)
Zs = numpy.exp(logZ-logZ.max())
Zs /= Zs.sum()
return numpy.sum(Zs * DKLs.transpose(), axis=1)
|
williamjameshandley/fgivenx | fgivenx/drivers.py | compute_pmf | python | def compute_pmf(f, x, samples, **kwargs):
r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
ntrim = kwargs.pop('ntrim', 100000)
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# y
if y is not None:
y = numpy.array(y, dtype='double')
if len(y.shape) is not 1:
raise ValueError("y should be a 1D array")
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
if y is None:
ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)
ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)
y = numpy.linspace(ymin, ymax, ny)
return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,
cache=cache, tqdm_kwargs=tqdm_kwargs) | r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/drivers.py#L355-L448 | [
"def compute_pmf(fsamps, y, **kwargs):\n \"\"\" Compute the pmf defined by fsamps at each x for each y.\n\n Parameters\n ----------\n fsamps: 2D array-like\n array of function samples, as returned by\n :func:`fgivenx.compute_samples`\n\n y: 1D array-like\n y values to evaluate the PMF at\n\n parallel, tqdm_kwargs: optional\n see docstring for :func:`fgivenx.parallel.parallel_apply`.\n\n Returns\n -------\n 2D numpy.array\n probability mass function at each x for each y\n `shape=(len(fsamps),len(y)`\n \"\"\"\n parallel = kwargs.pop('parallel', False)\n cache = kwargs.pop('cache', '')\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n if cache:\n cache = Cache(cache + '_masses')\n try:\n return cache.check(fsamps, y)\n except CacheException as e:\n print(e)\n\n masses = parallel_apply(PMF, fsamps, postcurry=(y,), parallel=parallel,\n tqdm_kwargs=tqdm_kwargs)\n masses = numpy.array(masses).transpose().copy()\n\n if cache:\n cache.save(fsamps, y, masses)\n\n return masses\n",
"def compute_samples(f, x, samples, **kwargs):\n r\"\"\"\n Apply the function(s) :math:`f(x;\\theta)` to the arrays defined in `x` and\n `samples`. Has options for weighting, trimming, cacheing & parallelising.\n\n Additionally, if a list of log-evidences are passed, along with list of\n functions, samples and optional weights it marginalises over the models\n according to the evidences.\n\n Parameters\n ----------\n f: function\n function :math:`f(x;\\theta)` (or list of functions for each model) with\n dependent variable :math:`x`, parameterised by :math:`\\theta`.\n\n x: 1D array-like\n `x` values to evaluate :math:`f(x;\\theta)` at.\n\n samples: 2D array-like\n :math:`\\theta` samples (or list of :math:`\\theta` samples) to evaluate\n :math:`f(x;\\theta)` at.\n `shape = (nsamples, npars)`\n\n logZ: 1D array-like, optional\n log-evidences of each model if multiple models are passed.\n Should be same length as the list `f`, and need not be normalised.\n Default: `numpy.ones_like(f)`\n\n weights: 1D array-like, optional\n sample weights (or list of weights), if desired. Should have length\n same as `samples.shape[0]`.\n Default: `numpy.ones_like(samples)`\n\n ntrim: int, optional\n Approximate number of samples to trim down to, if desired. Useful if\n the posterior is dramatically oversampled.\n Default: None\n\n cache: str, optional\n File root for saving previous calculations for re-use.\n Default: None\n\n parallel, tqdm_args:\n see docstring for :func:`fgivenx.parallel.parallel_apply`\n\n Returns\n -------\n 2D numpy.array\n Evaluate the function `f` at each x value and each theta.\n Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`\n \"\"\"\n logZ = kwargs.pop('logZ', None)\n weights = kwargs.pop('weights', None)\n ntrim = kwargs.pop('ntrim', None)\n cache = kwargs.pop('cache', '')\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)\n\n logZ, weights = _normalise_weights(logZ, weights, ntrim)\n\n for i, (s, w) in enumerate(zip(samples, weights)):\n samples[i] = _equally_weight_samples(s, w)\n\n return fgivenx.samples.compute_samples(f, x, samples,\n parallel=parallel, cache=cache,\n tqdm_kwargs=tqdm_kwargs)\n"
] | r"""This module provides utilities for computing the grid for contours of a
function reconstruction plot.
Required ingredients:
* sampled posterior probability distribution :math:`P(\theta)`
* independent variable :math:`x`
* dependent variable :math:`y`
* functional form :math:`y = f(x;\theta)` parameterised by :math:`\theta`
Assuming that you have obtained samples of :math:`\theta` from an MCMC
process, we aim to compute the density:
.. math::
P(y|x) &= \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta \\
&= \int \delta(y-f(x;\theta)) P(\theta) d\theta
which gives our degree of knowledge for each :math:`y=f(x;\theta)` value
given an :math:`x` value.
In fact, for a more representative plot, we are not actually
interested in the value of the probability density above, but in fact
require the "iso-probablity posterior mass"
.. math::
\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'
We thus need to compute this function on a rectangular grid of :math:`x`
and :math:`y`.
"""
import numpy
import fgivenx.samples
import fgivenx.mass
import fgivenx.dkl
import fgivenx.plot
import matplotlib.pyplot as plt
from fgivenx._utils import _check_args, _normalise_weights,\
_equally_weight_samples
def plot_contours(f, x, samples, ax=None, **kwargs):
r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
y, pmf = compute_pmf(f, x, samples, weights=weights, logZ=logZ,
ntrim=ntrim, ny=ny, y=y,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(x, y, pmf, ax, **kwargs)
return cbar
def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs)
def plot_dkl(f, x, samples, prior_samples, ax=None, **kwargs):
r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
dkls = compute_dkl(f, x, samples, prior_samples,
logZ=logZ, parallel=parallel,
cache=cache, prior_cache=prior_cache,
tqdm_kwargs=tqdm_kwargs,
ntrim=ntrim, weights=weights,
prior_weights=prior_weights)
if ax is None:
ax = plt.gca()
ax.plot(x, dkls, **kwargs)
def compute_samples(f, x, samples, **kwargs):
r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)
logZ, weights = _normalise_weights(logZ, weights, ntrim)
for i, (s, w) in enumerate(zip(samples, weights)):
samples[i] = _equally_weight_samples(s, w)
return fgivenx.samples.compute_samples(f, x, samples,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
def compute_dkl(f, x, samples, prior_samples, **kwargs):
r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
prior_samples = [prior_samples]
weights = [weights]
prior_weights = [prior_weights]
cache = [cache]
prior_cache = [prior_cache]
DKLs = []
for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,
prior_samples, prior_weights):
fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,
parallel=parallel, cache=pc,
tqdm_kwargs=tqdm_kwargs)
dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
DKLs.append(dkls)
logZ = numpy.array(logZ)
DKLs = numpy.array(DKLs)
Zs = numpy.exp(logZ-logZ.max())
Zs /= Zs.sum()
return numpy.sum(Zs * DKLs.transpose(), axis=1)
|
williamjameshandley/fgivenx | fgivenx/drivers.py | compute_dkl | python | def compute_dkl(f, x, samples, prior_samples, **kwargs):
r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if logZ is None:
logZ = [0]
f = [f]
samples = [samples]
prior_samples = [prior_samples]
weights = [weights]
prior_weights = [prior_weights]
cache = [cache]
prior_cache = [prior_cache]
DKLs = []
for fi, c, pc, s, w, ps, pw in zip(f, cache, prior_cache, samples, weights,
prior_samples, prior_weights):
fsamps = compute_samples(fi, x, s, weights=w, ntrim=ntrim,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
fsamps_prior = compute_samples(fi, x, ps, weights=pw, ntrim=ntrim,
parallel=parallel, cache=pc,
tqdm_kwargs=tqdm_kwargs)
dkls = fgivenx.dkl.compute_dkl(fsamps, fsamps_prior,
parallel=parallel, cache=c,
tqdm_kwargs=tqdm_kwargs)
DKLs.append(dkls)
logZ = numpy.array(logZ)
DKLs = numpy.array(DKLs)
Zs = numpy.exp(logZ-logZ.max())
Zs /= Zs.sum()
return numpy.sum(Zs * DKLs.transpose(), axis=1) | r"""
Compute the Kullback-Leibler divergence at each value of `x` for the prior
and posterior defined by `prior_samples` and `samples`.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
1D numpy array:
dkl values at each value of `x`. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/drivers.py#L451-L546 | [
"def compute_dkl(fsamps, prior_fsamps, **kwargs):\n \"\"\"\n Compute the Kullback Leibler divergence for function samples for posterior\n and prior pre-calculated at a range of x values.\n\n Parameters\n ----------\n fsamps: 2D numpy.array\n Posterior function samples, as computed by\n :func:`fgivenx.compute_samples`\n\n prior_fsamps: 2D numpy.array\n Prior function samples, as computed by :func:`fgivenx.compute_samples`\n\n parallel, tqdm_kwargs: optional\n see docstring for :func:`fgivenx.parallel.parallel_apply`.\n\n cache: str, optional\n File root for saving previous calculations for re-use.\n\n Returns\n -------\n 1D numpy.array:\n Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))`\n \"\"\"\n\n parallel = kwargs.pop('parallel', False)\n cache = kwargs.pop('cache', '')\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n if cache:\n cache = Cache(cache + '_dkl')\n try:\n return cache.check(fsamps, prior_fsamps)\n except CacheException as e:\n print(e)\n\n zip_fsamps = list(zip(fsamps, prior_fsamps))\n dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel,\n tqdm_kwargs=tqdm_kwargs)\n dkls = numpy.array(dkls)\n\n if cache:\n cache.save(fsamps, prior_fsamps, dkls)\n\n return dkls\n",
"def compute_samples(f, x, samples, **kwargs):\n r\"\"\"\n Apply the function(s) :math:`f(x;\\theta)` to the arrays defined in `x` and\n `samples`. Has options for weighting, trimming, cacheing & parallelising.\n\n Additionally, if a list of log-evidences are passed, along with list of\n functions, samples and optional weights it marginalises over the models\n according to the evidences.\n\n Parameters\n ----------\n f: function\n function :math:`f(x;\\theta)` (or list of functions for each model) with\n dependent variable :math:`x`, parameterised by :math:`\\theta`.\n\n x: 1D array-like\n `x` values to evaluate :math:`f(x;\\theta)` at.\n\n samples: 2D array-like\n :math:`\\theta` samples (or list of :math:`\\theta` samples) to evaluate\n :math:`f(x;\\theta)` at.\n `shape = (nsamples, npars)`\n\n logZ: 1D array-like, optional\n log-evidences of each model if multiple models are passed.\n Should be same length as the list `f`, and need not be normalised.\n Default: `numpy.ones_like(f)`\n\n weights: 1D array-like, optional\n sample weights (or list of weights), if desired. Should have length\n same as `samples.shape[0]`.\n Default: `numpy.ones_like(samples)`\n\n ntrim: int, optional\n Approximate number of samples to trim down to, if desired. Useful if\n the posterior is dramatically oversampled.\n Default: None\n\n cache: str, optional\n File root for saving previous calculations for re-use.\n Default: None\n\n parallel, tqdm_args:\n see docstring for :func:`fgivenx.parallel.parallel_apply`\n\n Returns\n -------\n 2D numpy.array\n Evaluate the function `f` at each x value and each theta.\n Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`\n \"\"\"\n logZ = kwargs.pop('logZ', None)\n weights = kwargs.pop('weights', None)\n ntrim = kwargs.pop('ntrim', None)\n cache = kwargs.pop('cache', '')\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n\n logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)\n\n logZ, weights = _normalise_weights(logZ, weights, ntrim)\n\n for i, (s, w) in enumerate(zip(samples, weights)):\n samples[i] = _equally_weight_samples(s, w)\n\n return fgivenx.samples.compute_samples(f, x, samples,\n parallel=parallel, cache=cache,\n tqdm_kwargs=tqdm_kwargs)\n"
] | r"""This module provides utilities for computing the grid for contours of a
function reconstruction plot.
Required ingredients:
* sampled posterior probability distribution :math:`P(\theta)`
* independent variable :math:`x`
* dependent variable :math:`y`
* functional form :math:`y = f(x;\theta)` parameterised by :math:`\theta`
Assuming that you have obtained samples of :math:`\theta` from an MCMC
process, we aim to compute the density:
.. math::
P(y|x) &= \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta \\
&= \int \delta(y-f(x;\theta)) P(\theta) d\theta
which gives our degree of knowledge for each :math:`y=f(x;\theta)` value
given an :math:`x` value.
In fact, for a more representative plot, we are not actually
interested in the value of the probability density above, but in fact
require the "iso-probablity posterior mass"
.. math::
\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'
We thus need to compute this function on a rectangular grid of :math:`x`
and :math:`y`.
"""
import numpy
import fgivenx.samples
import fgivenx.mass
import fgivenx.dkl
import fgivenx.plot
import matplotlib.pyplot as plt
from fgivenx._utils import _check_args, _normalise_weights,\
_equally_weight_samples
def plot_contours(f, x, samples, ax=None, **kwargs):
r"""
Plot the probability mass function given `x` at a range of :math:`y` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of `y` axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
y, pmf = compute_pmf(f, x, samples, weights=weights, logZ=logZ,
ntrim=ntrim, ny=ny, y=y,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
cbar = fgivenx.plot.plot(x, y, pmf, ax, **kwargs)
return cbar
def plot_lines(f, x, samples, ax=None, **kwargs):
r"""
Plot a representative set of functions to sample
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the probability mass
function for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot_lines`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
fgivenx.plot.plot_lines(x, fsamps, ax, **kwargs)
def plot_dkl(f, x, samples, prior_samples, ax=None, **kwargs):
r"""
Plot the Kullback-Leibler divergence at each value of :math:`x` for the
prior and posterior defined by `prior_samples` and `samples`.
Let the posterior be:
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta)P(\theta) d\theta`
and the prior be:
:math:`Q(y|x) = \int P(y=f(x;\theta)|x,\theta)Q(\theta) d\theta`
then the Kullback-Leibler divergence at each x is defined by
:math:`D_\mathrm{KL}(x)=\int P(y|x)\ln\left[\frac{Q(y|x)}{P(y|x)}\right]dy`
Additionally, if a list of log-evidences are passed, along with list of
functions, and list of samples, this function plots the Kullback-Leibler
divergence for all models marginalised according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples, prior_samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) from
posterior and prior to evaluate :math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights, prior_weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache, prior_cache: str, optional
File roots for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
kwargs: further keyword arguments
Any further keyword arguments are plotting keywords that are passed to
:func:`fgivenx.plot.plot`.
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
prior_weights = kwargs.pop('prior_weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
prior_cache = kwargs.pop('prior_cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
dkls = compute_dkl(f, x, samples, prior_samples,
logZ=logZ, parallel=parallel,
cache=cache, prior_cache=prior_cache,
tqdm_kwargs=tqdm_kwargs,
ntrim=ntrim, weights=weights,
prior_weights=prior_weights)
if ax is None:
ax = plt.gca()
ax.plot(x, dkls, **kwargs)
def compute_samples(f, x, samples, **kwargs):
r"""
Apply the function(s) :math:`f(x;\theta)` to the arrays defined in `x` and
`samples`. Has options for weighting, trimming, cacheing & parallelising.
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use.
Default: None
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
2D numpy.array
Evaluate the function `f` at each x value and each theta.
Equivalent to `[[f(x_i,theta) for theta in samples] for x_i in x]`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ntrim = kwargs.pop('ntrim', None)
cache = kwargs.pop('cache', '')
parallel = kwargs.pop('parallel', False)
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
logZ, f, x, samples, weights = _check_args(logZ, f, x, samples, weights)
logZ, weights = _normalise_weights(logZ, weights, ntrim)
for i, (s, w) in enumerate(zip(samples, weights)):
samples[i] = _equally_weight_samples(s, w)
return fgivenx.samples.compute_samples(f, x, samples,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
def compute_pmf(f, x, samples, **kwargs):
r"""
Compute the probability mass function given `x` at a range of `x` values
for :math:`y = f(x|\theta)`
:math:`P(y|x) = \int P(y=f(x;\theta)|x,\theta) P(\theta) d\theta`
:math:`\mathrm{pmf}(y|x) = \int_{P(y'|x) < P(y|x)} P(y'|x) dy'`
Additionally, if a list of log-evidences are passed, along with list of
functions, samples and optional weights it marginalises over the models
according to the evidences.
Parameters
----------
f: function
function :math:`f(x;\theta)` (or list of functions for each model) with
dependent variable :math:`x`, parameterised by :math:`\theta`.
x: 1D array-like
`x` values to evaluate :math:`f(x;\theta)` at.
samples: 2D array-like
:math:`\theta` samples (or list of :math:`\theta` samples) to evaluate
:math:`f(x;\theta)` at.
`shape = (nsamples, npars)`
logZ: 1D array-like, optional
log-evidences of each model if multiple models are passed.
Should be same length as the list `f`, and need not be normalised.
Default: `numpy.ones_like(f)`
weights: 1D array-like, optional
sample weights (or list of weights), if desired. Should have length
same as `samples.shape[0]`.
Default: `numpy.ones_like(samples)`
ny: int, optional
Resolution of y axis.
Default: `100`
y: array-like, optional
Explicit descriptor of `y` values to evaluate.
Default: `numpy.linspace(min(f), max(f), ny)`
ntrim: int, optional
Approximate number of samples to trim down to, if desired. Useful if
the posterior is dramatically oversampled.
Default: None
cache: str, optional
File root for saving previous calculations for re-use
parallel, tqdm_args:
see docstring for :func:`fgivenx.parallel.parallel_apply`
Returns
-------
1D numpy.array:
`y` values pmf is computed at `shape=(len(y))` or `ny`
2D numpy.array:
pmf values at each `x` and `y` `shape=(len(x),len(y))`
"""
logZ = kwargs.pop('logZ', None)
weights = kwargs.pop('weights', None)
ny = kwargs.pop('ny', 100)
y = kwargs.pop('y', None)
ntrim = kwargs.pop('ntrim', 100000)
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# y
if y is not None:
y = numpy.array(y, dtype='double')
if len(y.shape) is not 1:
raise ValueError("y should be a 1D array")
fsamps = compute_samples(f, x, samples, logZ=logZ,
weights=weights, ntrim=ntrim,
parallel=parallel, cache=cache,
tqdm_kwargs=tqdm_kwargs)
if y is None:
ymin = fsamps[~numpy.isnan(fsamps)].min(axis=None)
ymax = fsamps[~numpy.isnan(fsamps)].max(axis=None)
y = numpy.linspace(ymin, ymax, ny)
return y, fgivenx.mass.compute_pmf(fsamps, y, parallel=parallel,
cache=cache, tqdm_kwargs=tqdm_kwargs)
|
williamjameshandley/fgivenx | fgivenx/dkl.py | DKL | python | def DKL(arrays):
samples, prior_samples = arrays
samples = samples[~numpy.isnan(samples)]
prior_samples = prior_samples[~numpy.isnan(prior_samples)]
return (
gaussian_kde(samples).logpdf(samples)
- gaussian_kde(prior_samples).logpdf(samples)
).mean() | Compute the Kullback-Leibler divergence from one distribution Q to another
P, where Q and P are represented by a set of samples.
Parameters
----------
arrays: tuple(1D numpy.array,1D numpy.array)
samples defining distributions P & Q respectively
Returns
-------
float:
Kullback Leibler divergence. | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/dkl.py#L7-L28 | null | import numpy
from scipy.stats import gaussian_kde
from fgivenx.io import CacheException, Cache
from fgivenx.parallel import parallel_apply
def compute_dkl(fsamps, prior_fsamps, **kwargs):
"""
Compute the Kullback Leibler divergence for function samples for posterior
and prior pre-calculated at a range of x values.
Parameters
----------
fsamps: 2D numpy.array
Posterior function samples, as computed by
:func:`fgivenx.compute_samples`
prior_fsamps: 2D numpy.array
Prior function samples, as computed by :func:`fgivenx.compute_samples`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
cache: str, optional
File root for saving previous calculations for re-use.
Returns
-------
1D numpy.array:
Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))`
"""
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_dkl')
try:
return cache.check(fsamps, prior_fsamps)
except CacheException as e:
print(e)
zip_fsamps = list(zip(fsamps, prior_fsamps))
dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
dkls = numpy.array(dkls)
if cache:
cache.save(fsamps, prior_fsamps, dkls)
return dkls
|
williamjameshandley/fgivenx | fgivenx/dkl.py | compute_dkl | python | def compute_dkl(fsamps, prior_fsamps, **kwargs):
parallel = kwargs.pop('parallel', False)
cache = kwargs.pop('cache', '')
tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
if cache:
cache = Cache(cache + '_dkl')
try:
return cache.check(fsamps, prior_fsamps)
except CacheException as e:
print(e)
zip_fsamps = list(zip(fsamps, prior_fsamps))
dkls = parallel_apply(DKL, zip_fsamps, parallel=parallel,
tqdm_kwargs=tqdm_kwargs)
dkls = numpy.array(dkls)
if cache:
cache.save(fsamps, prior_fsamps, dkls)
return dkls | Compute the Kullback Leibler divergence for function samples for posterior
and prior pre-calculated at a range of x values.
Parameters
----------
fsamps: 2D numpy.array
Posterior function samples, as computed by
:func:`fgivenx.compute_samples`
prior_fsamps: 2D numpy.array
Prior function samples, as computed by :func:`fgivenx.compute_samples`
parallel, tqdm_kwargs: optional
see docstring for :func:`fgivenx.parallel.parallel_apply`.
cache: str, optional
File root for saving previous calculations for re-use.
Returns
-------
1D numpy.array:
Kullback-Leibler divergences at each value of x. `shape=(len(fsamps))` | train | https://github.com/williamjameshandley/fgivenx/blob/a16790652a3cef3cfacd4b97da62786cb66fec13/fgivenx/dkl.py#L31-L78 | [
"def parallel_apply(f, array, **kwargs):\n \"\"\" Apply a function to an array with openmp parallelisation.\n\n Equivalent to `[f(x) for x in array]`, but parallelised if required.\n\n Parameters\n ----------\n f: function\n Univariate function to apply to each element of array\n\n array: array-like\n Array to apply f to\n\n parallel: int or bool, optional\n int > 0: number of processes to parallelise over\n\n int < 0 or bool=True: use OMP_NUM_THREADS to choose parallelisation\n\n bool=False or int=0: do not parallelise\n\n tqdm_kwargs: dict, optional\n additional kwargs for tqdm progress bars.\n\n precurry: tuple, optional\n immutable arguments to pass to f before x,\n i.e. `[f(precurry,x) for x in array]`\n\n postcurry: tuple, optional\n immutable arguments to pass to f after x\n i.e. `[f(x,postcurry) for x in array]`\n\n Returns\n -------\n list:\n `[f(precurry,x,postcurry) for x in array]`\n parallelised according to parallel\n \"\"\"\n\n precurry = tuple(kwargs.pop('precurry', ()))\n postcurry = tuple(kwargs.pop('postcurry', ()))\n parallel = kwargs.pop('parallel', False)\n tqdm_kwargs = kwargs.pop('tqdm_kwargs', {})\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n try:\n # If running in a jupyter notebook then use tqdm_notebook.\n progress = tqdm_notebook if get_ipython().has_trait('kernel') else tqdm\n except (NameError, AssertionError):\n # Otherwise use regular tqdm progress bar\n progress = tqdm\n if not parallel:\n return [f(*(precurry + (x,) + postcurry)) for x in\n progress(array, **tqdm_kwargs)]\n elif parallel is True:\n parallel = cpu_count()\n elif isinstance(parallel, int):\n if parallel < 0:\n parallel = cpu_count()\n else:\n parallel = parallel\n else:\n raise ValueError(\"parallel keyword must be an integer or bool\")\n\n if parallel and not PARALLEL:\n warnings.warn(\"You need to install the package joblib\"\n \"if you want to use parallelisation\")\n\n return Parallel(n_jobs=parallel)(delayed(f)(*(precurry + (x,) + postcurry))\n for x in progress(array, **tqdm_kwargs))\n",
"def check(self, *args):\n \"\"\" Check that the arguments haven't changed since the last call.\n\n Parameters\n ----------\n *args:\n All but the last argument are inputs to the cached function. The\n last is the actual value of the function.\n\n Returns\n -------\n If arguments unchanged:\n return the cached answer\n else:\n indicate recomputation required by throwing a\n :class:`CacheException`.\n \"\"\"\n data = self.load()\n\n if len(data)-1 != len(args):\n raise ValueError(\"Wrong number of arguments passed to Cache.check\")\n\n try:\n for x, x_check in zip(data, args):\n if isinstance(x, list):\n if len(x) != len(x_check):\n raise CacheException\n for x_i, x_check_i in zip(x, x_check):\n if x_i.shape != x_check_i.shape:\n raise CacheException\n elif not numpy.allclose(x_i, x_check_i,\n equal_nan=True):\n raise CacheException\n elif x.shape != x_check.shape:\n raise CacheException\n elif not numpy.allclose(x, x_check, equal_nan=True):\n raise CacheException\n\n except CacheException:\n raise CacheChanged(self.file_root)\n\n print(CacheOK(self.file_root))\n return data[-1]\n",
"def save(self, *args):\n \"\"\" Save cache to file using pickle.\n\n Parameters\n ----------\n *args:\n All but the last argument are inputs to the cached function. The\n last is the actual value of the function.\n \"\"\"\n with open(self.file_root + '.pkl', \"wb\") as f:\n pickle.dump(args, f, protocol=pickle.HIGHEST_PROTOCOL)\n"
] | import numpy
from scipy.stats import gaussian_kde
from fgivenx.io import CacheException, Cache
from fgivenx.parallel import parallel_apply
def DKL(arrays):
"""
Compute the Kullback-Leibler divergence from one distribution Q to another
P, where Q and P are represented by a set of samples.
Parameters
----------
arrays: tuple(1D numpy.array,1D numpy.array)
samples defining distributions P & Q respectively
Returns
-------
float:
Kullback Leibler divergence.
"""
samples, prior_samples = arrays
samples = samples[~numpy.isnan(samples)]
prior_samples = prior_samples[~numpy.isnan(prior_samples)]
return (
gaussian_kde(samples).logpdf(samples)
- gaussian_kde(prior_samples).logpdf(samples)
).mean()
|
praekeltfoundation/seed-auth-api | authapi/views.py | get_true_false_both | python | def get_true_false_both(query_params, field_name, default):
'''Tries to get and return a valid of true, false, or both from the field
name in the query string, raises a ValidationError for invalid values.'''
valid = ('true', 'false', 'both')
value = query_params.get(field_name, default).lower()
if value in valid:
return value
v = ', '.join(sorted(valid))
raise serializers.ValidationError({
field_name: ['Must be one of [%s]' % v],
}) | Tries to get and return a valid of true, false, or both from the field
name in the query string, raises a ValidationError for invalid values. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L23-L33 | null | from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from rest_framework import viewsets, status, serializers
from rest_framework.authtoken.models import Token
from rest_framework.generics import get_object_or_404
from rest_framework.request import clone_request
from rest_framework.response import Response
from rest_framework.mixins import (
DestroyModelMixin, CreateModelMixin, RetrieveModelMixin, UpdateModelMixin,
ListModelMixin)
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from rest_framework_extensions.mixins import NestedViewSetMixin
from authapi.models import SeedOrganization, SeedTeam, SeedPermission
from authapi import permissions
from authapi.serializers import (
OrganizationSerializer, TeamSerializer, UserSerializer, NewUserSerializer,
PermissionSerializer, CreateTokenSerializer, PermissionsUserSerializer)
class OrganizationViewSet(viewsets.ModelViewSet):
queryset = SeedOrganization.objects.all()
serializer_class = OrganizationSerializer
permission_classes = (permissions.OrganizationPermission,)
def get_queryset(self):
'''We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.'''
if self.action == 'list':
archived = get_true_false_both(
self.request.query_params, 'archived', 'false')
if archived == 'true':
return self.queryset.filter(archived=True)
if archived == 'false':
return self.queryset.filter(archived=False)
return self.queryset
def destroy(self, request, pk=None):
'''For DELETE actions, archive the organization, don't delete.'''
org = self.get_object()
org.archived = True
org.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class OrganizationUsersViewSet(NestedViewSetMixin, viewsets.ViewSet):
'''Nested viewset that allows users to add or remove users from
organizations.'''
permission_classes = (permissions.OrganizationUsersPermission,)
def update(self, request, pk=None, parent_lookup_organization=None):
'''Add a user to an organization.'''
user = get_object_or_404(User, pk=pk)
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
self.check_object_permissions(request, org)
org.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT)
def destroy(self, request, pk=None, parent_lookup_organization=None):
'''Remove a user from an organization.'''
user = get_object_or_404(User, pk=pk)
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
self.check_object_permissions(request, org)
org.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT)
class BaseTeamViewSet(
NestedViewSetMixin, RetrieveModelMixin, UpdateModelMixin,
DestroyModelMixin, ListModelMixin, GenericViewSet):
queryset = SeedTeam.objects.all()
serializer_class = TeamSerializer
permission_classes = (permissions.TeamPermission,)
def get_queryset(self):
'''We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.
We also have the query params permission_contains and object_id, which
allow users to filter the teams based on the permissions they
contain.'''
queryset = super(BaseTeamViewSet, self).get_queryset()
if self.action == 'list':
archived = get_true_false_both(
self.request.query_params, 'archived', 'false')
if archived == 'true':
queryset = queryset.filter(archived=True)
elif archived == 'false':
queryset = queryset.filter(archived=False)
permission = self.request.query_params.get(
'permission_contains', None)
if permission is not None:
queryset = queryset.filter(
permissions__type__contains=permission).distinct()
object_id = self.request.query_params.get('object_id', None)
if object_id is not None:
queryset = queryset.filter(
permissions__object_id=object_id).distinct()
namespace = self.request.query_params.get('namespace', None)
if namespace is not None:
queryset = queryset.filter(
permissions__namespace=namespace).distinct()
permission = permissions.TeamPermission()
queryset = [
team for team in queryset if
permission.has_object_permission(self.request, self, team)]
return queryset
def perform_destroy(self, instance):
instance.archived = True
instance.save()
class TeamViewSet(BaseTeamViewSet):
pass
class OrganizationTeamViewSet(BaseTeamViewSet, CreateModelMixin):
def create(self, request, parent_lookup_organization=None):
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
permission = permissions.TeamCreatePermission()
if not permission.has_object_permission(request, self, org):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
request.data['organization'] = org.pk
return super(OrganizationTeamViewSet, self).create(request)
class TeamPermissionViewSet(
NestedViewSetMixin, DestroyModelMixin, GenericViewSet):
'''Nested viewset to add and remove permissions from teams.'''
queryset = SeedPermission.objects.all()
serializer_class = PermissionSerializer
permission_classes = (permissions.TeamPermissionPermission,)
def check_team_permissions(self, request, teamid, orgid=None):
if orgid is not None:
team = get_object_or_404(
SeedTeam, pk=teamid, organization_id=orgid)
else:
team = get_object_or_404(SeedTeam, pk=teamid)
permission = permissions.TeamPermission()
fake_request = clone_request(request, 'GET')
if not permission.has_object_permission(fake_request, self, team):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
return team
def create(
self, request, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a permission to a team.'''
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
permission = team.permissions.create(**serializer.validated_data)
serializer = self.get_serializer(instance=permission)
return Response(serializer.data, status=status.HTTP_201_CREATED)
def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a permission from a team.'''
self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
return super(TeamPermissionViewSet, self).destroy(
request, pk, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
class TeamUsersViewSet(NestedViewSetMixin, GenericViewSet):
'''Nested viewset that allows users to add or remove users from teams.'''
queryset = User.objects.all()
permission_classes = (IsAuthenticated,)
def check_team_permissions(self, request, teamid, orgid=None):
if orgid is not None:
team = get_object_or_404(
SeedTeam, pk=teamid, organization_id=orgid)
else:
team = get_object_or_404(SeedTeam, pk=teamid)
permission = permissions.TeamPermission()
fake_request = clone_request(request, 'PUT')
if not permission.has_object_permission(fake_request, self, team):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
return team
def update(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a user to a team.'''
user = get_object_or_404(User, pk=pk)
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT)
def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a user from an organization.'''
user = self.get_object()
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT)
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
permission_classes = (permissions.UserPermission,)
def get_serializer_class(self):
if self.action == 'create':
return NewUserSerializer
else:
return UserSerializer
def get_queryset(self):
'''We want to still be able to modify archived users, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.'''
if self.action == 'list':
active = get_true_false_both(
self.request.query_params, 'active', 'true')
if active == 'true':
return self.queryset.filter(is_active=True)
if active == 'false':
return self.queryset.filter(is_active=False)
return self.queryset
def destroy(self, request, pk=None):
'''For DELETE actions, actually deactivate the user, don't delete.'''
user = self.get_object()
user.is_active = False
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class TokenView(APIView):
permission_classes = (AllowAny,)
def post(self, request):
'''Create a token, given an email and password. Removes all other
tokens for that user.'''
serializer = CreateTokenSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data.get('email')
password = serializer.validated_data.get('password')
user = authenticate(username=email, password=password)
if not user:
return Response(status=status.HTTP_401_UNAUTHORIZED)
Token.objects.filter(user=user).delete()
token = Token.objects.create(user=user)
return Response(
status=status.HTTP_201_CREATED, data={'token': token.key})
class UserPermissionsView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
'''Get user information, with a list of permissions for that user.'''
user = request.user
serializer = PermissionsUserSerializer(
instance=user, context={'request': request})
return Response(data=serializer.data)
|
praekeltfoundation/seed-auth-api | authapi/views.py | OrganizationViewSet.get_queryset | python | def get_queryset(self):
'''We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.'''
if self.action == 'list':
archived = get_true_false_both(
self.request.query_params, 'archived', 'false')
if archived == 'true':
return self.queryset.filter(archived=True)
if archived == 'false':
return self.queryset.filter(archived=False)
return self.queryset | We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L41-L54 | [
"def get_true_false_both(query_params, field_name, default):\n '''Tries to get and return a valid of true, false, or both from the field\n name in the query string, raises a ValidationError for invalid values.'''\n valid = ('true', 'false', 'both')\n value = query_params.get(field_name, default).lower()\n if value in valid:\n return value\n v = ', '.join(sorted(valid))\n raise serializers.ValidationError({\n field_name: ['Must be one of [%s]' % v],\n })\n"
] | class OrganizationViewSet(viewsets.ModelViewSet):
queryset = SeedOrganization.objects.all()
serializer_class = OrganizationSerializer
permission_classes = (permissions.OrganizationPermission,)
def destroy(self, request, pk=None):
'''For DELETE actions, archive the organization, don't delete.'''
org = self.get_object()
org.archived = True
org.save()
return Response(status=status.HTTP_204_NO_CONTENT)
|
praekeltfoundation/seed-auth-api | authapi/views.py | OrganizationViewSet.destroy | python | def destroy(self, request, pk=None):
'''For DELETE actions, archive the organization, don't delete.'''
org = self.get_object()
org.archived = True
org.save()
return Response(status=status.HTTP_204_NO_CONTENT) | For DELETE actions, archive the organization, don't delete. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L56-L61 | null | class OrganizationViewSet(viewsets.ModelViewSet):
queryset = SeedOrganization.objects.all()
serializer_class = OrganizationSerializer
permission_classes = (permissions.OrganizationPermission,)
def get_queryset(self):
'''We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.'''
if self.action == 'list':
archived = get_true_false_both(
self.request.query_params, 'archived', 'false')
if archived == 'true':
return self.queryset.filter(archived=True)
if archived == 'false':
return self.queryset.filter(archived=False)
return self.queryset
|
praekeltfoundation/seed-auth-api | authapi/views.py | OrganizationUsersViewSet.update | python | def update(self, request, pk=None, parent_lookup_organization=None):
'''Add a user to an organization.'''
user = get_object_or_404(User, pk=pk)
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
self.check_object_permissions(request, org)
org.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT) | Add a user to an organization. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L69-L76 | null | class OrganizationUsersViewSet(NestedViewSetMixin, viewsets.ViewSet):
'''Nested viewset that allows users to add or remove users from
organizations.'''
permission_classes = (permissions.OrganizationUsersPermission,)
def destroy(self, request, pk=None, parent_lookup_organization=None):
'''Remove a user from an organization.'''
user = get_object_or_404(User, pk=pk)
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
self.check_object_permissions(request, org)
org.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT)
|
praekeltfoundation/seed-auth-api | authapi/views.py | OrganizationUsersViewSet.destroy | python | def destroy(self, request, pk=None, parent_lookup_organization=None):
'''Remove a user from an organization.'''
user = get_object_or_404(User, pk=pk)
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
self.check_object_permissions(request, org)
org.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT) | Remove a user from an organization. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L78-L85 | null | class OrganizationUsersViewSet(NestedViewSetMixin, viewsets.ViewSet):
'''Nested viewset that allows users to add or remove users from
organizations.'''
permission_classes = (permissions.OrganizationUsersPermission,)
def update(self, request, pk=None, parent_lookup_organization=None):
'''Add a user to an organization.'''
user = get_object_or_404(User, pk=pk)
org = get_object_or_404(
SeedOrganization, pk=parent_lookup_organization)
self.check_object_permissions(request, org)
org.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT)
|
praekeltfoundation/seed-auth-api | authapi/views.py | BaseTeamViewSet.get_queryset | python | def get_queryset(self):
'''We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.
We also have the query params permission_contains and object_id, which
allow users to filter the teams based on the permissions they
contain.'''
queryset = super(BaseTeamViewSet, self).get_queryset()
if self.action == 'list':
archived = get_true_false_both(
self.request.query_params, 'archived', 'false')
if archived == 'true':
queryset = queryset.filter(archived=True)
elif archived == 'false':
queryset = queryset.filter(archived=False)
permission = self.request.query_params.get(
'permission_contains', None)
if permission is not None:
queryset = queryset.filter(
permissions__type__contains=permission).distinct()
object_id = self.request.query_params.get('object_id', None)
if object_id is not None:
queryset = queryset.filter(
permissions__object_id=object_id).distinct()
namespace = self.request.query_params.get('namespace', None)
if namespace is not None:
queryset = queryset.filter(
permissions__namespace=namespace).distinct()
permission = permissions.TeamPermission()
queryset = [
team for team in queryset if
permission.has_object_permission(self.request, self, team)]
return queryset | We want to still be able to modify archived organizations, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.
We also have the query params permission_contains and object_id, which
allow users to filter the teams based on the permissions they
contain. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L95-L135 | [
"def get_true_false_both(query_params, field_name, default):\n '''Tries to get and return a valid of true, false, or both from the field\n name in the query string, raises a ValidationError for invalid values.'''\n valid = ('true', 'false', 'both')\n value = query_params.get(field_name, default).lower()\n if value in valid:\n return value\n v = ', '.join(sorted(valid))\n raise serializers.ValidationError({\n field_name: ['Must be one of [%s]' % v],\n })\n"
] | class BaseTeamViewSet(
NestedViewSetMixin, RetrieveModelMixin, UpdateModelMixin,
DestroyModelMixin, ListModelMixin, GenericViewSet):
queryset = SeedTeam.objects.all()
serializer_class = TeamSerializer
permission_classes = (permissions.TeamPermission,)
def perform_destroy(self, instance):
instance.archived = True
instance.save()
|
praekeltfoundation/seed-auth-api | authapi/views.py | TeamPermissionViewSet.create | python | def create(
self, request, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a permission to a team.'''
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
permission = team.permissions.create(**serializer.validated_data)
serializer = self.get_serializer(instance=permission)
return Response(serializer.data, status=status.HTTP_201_CREATED) | Add a permission to a team. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L182-L194 | null | class TeamPermissionViewSet(
NestedViewSetMixin, DestroyModelMixin, GenericViewSet):
'''Nested viewset to add and remove permissions from teams.'''
queryset = SeedPermission.objects.all()
serializer_class = PermissionSerializer
permission_classes = (permissions.TeamPermissionPermission,)
def check_team_permissions(self, request, teamid, orgid=None):
if orgid is not None:
team = get_object_or_404(
SeedTeam, pk=teamid, organization_id=orgid)
else:
team = get_object_or_404(SeedTeam, pk=teamid)
permission = permissions.TeamPermission()
fake_request = clone_request(request, 'GET')
if not permission.has_object_permission(fake_request, self, team):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
return team
def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a permission from a team.'''
self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
return super(TeamPermissionViewSet, self).destroy(
request, pk, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
|
praekeltfoundation/seed-auth-api | authapi/views.py | TeamPermissionViewSet.destroy | python | def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a permission from a team.'''
self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
return super(TeamPermissionViewSet, self).destroy(
request, pk, parent_lookup_seedteam,
parent_lookup_seedteam__organization) | Remove a permission from a team. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L196-L205 | null | class TeamPermissionViewSet(
NestedViewSetMixin, DestroyModelMixin, GenericViewSet):
'''Nested viewset to add and remove permissions from teams.'''
queryset = SeedPermission.objects.all()
serializer_class = PermissionSerializer
permission_classes = (permissions.TeamPermissionPermission,)
def check_team_permissions(self, request, teamid, orgid=None):
if orgid is not None:
team = get_object_or_404(
SeedTeam, pk=teamid, organization_id=orgid)
else:
team = get_object_or_404(SeedTeam, pk=teamid)
permission = permissions.TeamPermission()
fake_request = clone_request(request, 'GET')
if not permission.has_object_permission(fake_request, self, team):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
return team
def create(
self, request, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a permission to a team.'''
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
permission = team.permissions.create(**serializer.validated_data)
serializer = self.get_serializer(instance=permission)
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
praekeltfoundation/seed-auth-api | authapi/views.py | TeamUsersViewSet.update | python | def update(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a user to a team.'''
user = get_object_or_404(User, pk=pk)
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT) | Add a user to a team. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L228-L237 | null | class TeamUsersViewSet(NestedViewSetMixin, GenericViewSet):
'''Nested viewset that allows users to add or remove users from teams.'''
queryset = User.objects.all()
permission_classes = (IsAuthenticated,)
def check_team_permissions(self, request, teamid, orgid=None):
if orgid is not None:
team = get_object_or_404(
SeedTeam, pk=teamid, organization_id=orgid)
else:
team = get_object_or_404(SeedTeam, pk=teamid)
permission = permissions.TeamPermission()
fake_request = clone_request(request, 'PUT')
if not permission.has_object_permission(fake_request, self, team):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
return team
def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a user from an organization.'''
user = self.get_object()
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT)
|
praekeltfoundation/seed-auth-api | authapi/views.py | TeamUsersViewSet.destroy | python | def destroy(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Remove a user from an organization.'''
user = self.get_object()
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.remove(user)
return Response(status=status.HTTP_204_NO_CONTENT) | Remove a user from an organization. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L239-L248 | null | class TeamUsersViewSet(NestedViewSetMixin, GenericViewSet):
'''Nested viewset that allows users to add or remove users from teams.'''
queryset = User.objects.all()
permission_classes = (IsAuthenticated,)
def check_team_permissions(self, request, teamid, orgid=None):
if orgid is not None:
team = get_object_or_404(
SeedTeam, pk=teamid, organization_id=orgid)
else:
team = get_object_or_404(SeedTeam, pk=teamid)
permission = permissions.TeamPermission()
fake_request = clone_request(request, 'PUT')
if not permission.has_object_permission(fake_request, self, team):
self.permission_denied(
request, message=getattr(permission, 'message', None)
)
return team
def update(
self, request, pk=None, parent_lookup_seedteam=None,
parent_lookup_seedteam__organization=None):
'''Add a user to a team.'''
user = get_object_or_404(User, pk=pk)
team = self.check_team_permissions(
request, parent_lookup_seedteam,
parent_lookup_seedteam__organization)
team.users.add(user)
return Response(status=status.HTTP_204_NO_CONTENT)
|
praekeltfoundation/seed-auth-api | authapi/views.py | UserViewSet.get_queryset | python | def get_queryset(self):
'''We want to still be able to modify archived users, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.'''
if self.action == 'list':
active = get_true_false_both(
self.request.query_params, 'active', 'true')
if active == 'true':
return self.queryset.filter(is_active=True)
if active == 'false':
return self.queryset.filter(is_active=False)
return self.queryset | We want to still be able to modify archived users, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L261-L274 | [
"def get_true_false_both(query_params, field_name, default):\n '''Tries to get and return a valid of true, false, or both from the field\n name in the query string, raises a ValidationError for invalid values.'''\n valid = ('true', 'false', 'both')\n value = query_params.get(field_name, default).lower()\n if value in valid:\n return value\n v = ', '.join(sorted(valid))\n raise serializers.ValidationError({\n field_name: ['Must be one of [%s]' % v],\n })\n"
] | class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
permission_classes = (permissions.UserPermission,)
def get_serializer_class(self):
if self.action == 'create':
return NewUserSerializer
else:
return UserSerializer
def destroy(self, request, pk=None):
'''For DELETE actions, actually deactivate the user, don't delete.'''
user = self.get_object()
user.is_active = False
user.save()
return Response(status=status.HTTP_204_NO_CONTENT)
|
praekeltfoundation/seed-auth-api | authapi/views.py | UserViewSet.destroy | python | def destroy(self, request, pk=None):
'''For DELETE actions, actually deactivate the user, don't delete.'''
user = self.get_object()
user.is_active = False
user.save()
return Response(status=status.HTTP_204_NO_CONTENT) | For DELETE actions, actually deactivate the user, don't delete. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L276-L281 | null | class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
permission_classes = (permissions.UserPermission,)
def get_serializer_class(self):
if self.action == 'create':
return NewUserSerializer
else:
return UserSerializer
def get_queryset(self):
'''We want to still be able to modify archived users, but they
shouldn't show up on list views.
We have an archived query param, where 'true' shows archived, 'false'
omits them, and 'both' shows both.'''
if self.action == 'list':
active = get_true_false_both(
self.request.query_params, 'active', 'true')
if active == 'true':
return self.queryset.filter(is_active=True)
if active == 'false':
return self.queryset.filter(is_active=False)
return self.queryset
|
praekeltfoundation/seed-auth-api | authapi/views.py | TokenView.post | python | def post(self, request):
'''Create a token, given an email and password. Removes all other
tokens for that user.'''
serializer = CreateTokenSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = serializer.validated_data.get('email')
password = serializer.validated_data.get('password')
user = authenticate(username=email, password=password)
if not user:
return Response(status=status.HTTP_401_UNAUTHORIZED)
Token.objects.filter(user=user).delete()
token = Token.objects.create(user=user)
return Response(
status=status.HTTP_201_CREATED, data={'token': token.key}) | Create a token, given an email and password. Removes all other
tokens for that user. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L287-L303 | null | class TokenView(APIView):
permission_classes = (AllowAny,)
|
praekeltfoundation/seed-auth-api | authapi/views.py | UserPermissionsView.get | python | def get(self, request):
'''Get user information, with a list of permissions for that user.'''
user = request.user
serializer = PermissionsUserSerializer(
instance=user, context={'request': request})
return Response(data=serializer.data) | Get user information, with a list of permissions for that user. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/views.py#L309-L314 | null | class UserPermissionsView(APIView):
permission_classes = (IsAuthenticated,)
|
praekeltfoundation/seed-auth-api | authapi/serializers.py | BaseUserSerializer.create | python | def create(self, validated_data):
'''We want to set the username to be the same as the email, and use
the correct create function to make use of password hashing.'''
validated_data['username'] = validated_data['email']
admin = validated_data.pop('is_superuser', None)
if admin is True:
user = User.objects.create_superuser(**validated_data)
else:
user = User.objects.create_user(**validated_data)
return user | We want to set the username to be the same as the email, and use
the correct create function to make use of password hashing. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/serializers.py#L94-L105 | null | class BaseUserSerializer(BaseModelSerializer):
email = serializers.EmailField()
admin = serializers.BooleanField(source='is_superuser', required=False)
active = serializers.BooleanField(default=True, source='is_active')
password = serializers.CharField(
style={'input_type': 'password'}, write_only=True, required=False)
def update(self, instance, validated_data):
'''We want to set all the required fields if admin is set, and we want
to use the password hashing method if password is set.'''
admin = validated_data.pop('is_superuser', None)
password = validated_data.pop('password', None)
if validated_data.get('email') is not None:
validated_data['username'] = validated_data['email']
for attr, value in validated_data.items():
setattr(instance, attr, value)
if admin is not None:
instance.is_staff = admin
instance.is_superuser = admin
if password is not None:
instance.set_password(password)
instance.save()
return instance
|
praekeltfoundation/seed-auth-api | authapi/serializers.py | BaseUserSerializer.update | python | def update(self, instance, validated_data):
'''We want to set all the required fields if admin is set, and we want
to use the password hashing method if password is set.'''
admin = validated_data.pop('is_superuser', None)
password = validated_data.pop('password', None)
if validated_data.get('email') is not None:
validated_data['username'] = validated_data['email']
for attr, value in validated_data.items():
setattr(instance, attr, value)
if admin is not None:
instance.is_staff = admin
instance.is_superuser = admin
if password is not None:
instance.set_password(password)
instance.save()
return instance | We want to set all the required fields if admin is set, and we want
to use the password hashing method if password is set. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/serializers.py#L107-L124 | null | class BaseUserSerializer(BaseModelSerializer):
email = serializers.EmailField()
admin = serializers.BooleanField(source='is_superuser', required=False)
active = serializers.BooleanField(default=True, source='is_active')
password = serializers.CharField(
style={'input_type': 'password'}, write_only=True, required=False)
def create(self, validated_data):
'''We want to set the username to be the same as the email, and use
the correct create function to make use of password hashing.'''
validated_data['username'] = validated_data['email']
admin = validated_data.pop('is_superuser', None)
if admin is True:
user = User.objects.create_superuser(**validated_data)
else:
user = User.objects.create_user(**validated_data)
return user
|
praekeltfoundation/seed-auth-api | authapi/utils.py | get_user_permissions | python | def get_user_permissions(user):
'''Returns the queryset of permissions for the given user.'''
permissions = SeedPermission.objects.all()
# User must be on a team that grants the permission
permissions = permissions.filter(seedteam__users=user)
# The team must be active
permissions = permissions.filter(seedteam__archived=False)
# The organization of that team must be active
permissions = permissions.filter(
seedteam__organization__archived=False)
return permissions | Returns the queryset of permissions for the given user. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/utils.py#L4-L14 | null | from authapi.models import SeedPermission
def find_permission(
permissions, permission_type, object_id=None, namespace=None):
'''Given a queryset of permissions, filters depending on the permission
type, and optionally an object id and namespace.'''
if object_id is not None:
return permissions.filter(
type=permission_type, object_id=object_id, namespace=namespace)
return permissions.filter(type=permission_type)
|
praekeltfoundation/seed-auth-api | authapi/utils.py | find_permission | python | def find_permission(
permissions, permission_type, object_id=None, namespace=None):
'''Given a queryset of permissions, filters depending on the permission
type, and optionally an object id and namespace.'''
if object_id is not None:
return permissions.filter(
type=permission_type, object_id=object_id, namespace=namespace)
return permissions.filter(type=permission_type) | Given a queryset of permissions, filters depending on the permission
type, and optionally an object id and namespace. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/utils.py#L17-L24 | null | from authapi.models import SeedPermission
def get_user_permissions(user):
'''Returns the queryset of permissions for the given user.'''
permissions = SeedPermission.objects.all()
# User must be on a team that grants the permission
permissions = permissions.filter(seedteam__users=user)
# The team must be active
permissions = permissions.filter(seedteam__archived=False)
# The organization of that team must be active
permissions = permissions.filter(
seedteam__organization__archived=False)
return permissions
|
praekeltfoundation/seed-auth-api | authapi/permissions.py | TeamPermission.object_permission_set | python | def object_permission_set(self):
'''
admins, users with team:admin for the team, and users with org:admin,
team's organization have full access to teams. Users who are a member
of the team, or are a member of the team's organization, have read
access to the team.
'''
return Or(
AllowAdmin,
AllowObjectPermission('team:admin'),
AllowObjectPermission('org:admin', lambda t: t.organization_id),
And(
AllowOnlySafeHttpMethod,
Or(
ObjAttrTrue(
lambda r, t: t.users.filter(pk=r.user.pk).exists()),
ObjAttrTrue(
lambda r, t: t.organization.users.filter(
pk=r.user.pk).exists())
)
)
) | admins, users with team:admin for the team, and users with org:admin,
team's organization have full access to teams. Users who are a member
of the team, or are a member of the team's organization, have read
access to the team. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/permissions.py#L155-L176 | null | class TeamPermission(BaseComposedPermision):
'''Permissions for the TeamViewSet.'''
def global_permission_set(self):
'''All users must be authenticated.'''
return AllowOnlyAuthenticated
|
praekeltfoundation/seed-auth-api | authapi/permissions.py | UserPermission.global_permission_set | python | def global_permission_set(self):
'''All users must be authenticated. Only admins can create other admin
users.'''
only_admins_create_admins = Or(
AllowAdmin,
And(
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True),
Or(
AllowPermission('org:admin')
)
)
)
return And(
AllowOnlyAuthenticated,
Or(
Not(AllowCreate),
only_admins_create_admins
)
) | All users must be authenticated. Only admins can create other admin
users. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/permissions.py#L181-L201 | null | class UserPermission(BaseComposedPermision):
'''Permissions for the UserViewSet.'''
def object_permission_set(self):
'''All users have view permissions. Admin users, and users with
org:admin can create, update, and delete any user. Any user can update
or delete themselves. Only admins can create or modify other admin
users.'''
return Or(
AllowOnlySafeHttpMethod,
AllowAdmin,
And(
AllowPermission('org:admin'),
ObjAttrTrue(lambda _, u: not u.is_superuser),
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True)
),
And(
AllowModify,
ObjAttrTrue(
lambda req, user: user == req.user),
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True)
),
)
|
praekeltfoundation/seed-auth-api | authapi/permissions.py | UserPermission.object_permission_set | python | def object_permission_set(self):
'''All users have view permissions. Admin users, and users with
org:admin can create, update, and delete any user. Any user can update
or delete themselves. Only admins can create or modify other admin
users.'''
return Or(
AllowOnlySafeHttpMethod,
AllowAdmin,
And(
AllowPermission('org:admin'),
ObjAttrTrue(lambda _, u: not u.is_superuser),
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True)
),
And(
AllowModify,
ObjAttrTrue(
lambda req, user: user == req.user),
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True)
),
) | All users have view permissions. Admin users, and users with
org:admin can create, update, and delete any user. Any user can update
or delete themselves. Only admins can create or modify other admin
users. | train | https://github.com/praekeltfoundation/seed-auth-api/blob/ac03538ec4f9470931473f0700ad4fa69075d44b/authapi/permissions.py#L203-L224 | null | class UserPermission(BaseComposedPermision):
'''Permissions for the UserViewSet.'''
def global_permission_set(self):
'''All users must be authenticated. Only admins can create other admin
users.'''
only_admins_create_admins = Or(
AllowAdmin,
And(
ObjAttrTrue(
lambda r, _: r.data.get('admin') is not True),
Or(
AllowPermission('org:admin')
)
)
)
return And(
AllowOnlyAuthenticated,
Or(
Not(AllowCreate),
only_admins_create_admins
)
)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis._is_loggedin | python | def _is_loggedin(self, auth_secret):
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None) | Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L102-L130 | null | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis._check_password | python | def _check_password(password):
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error) | Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L152-L186 | null | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.register | python | def register(self, username, password):
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result) | Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L188-L271 | [
"def _check_username(username):\n \"\"\"Check if a username is valid.\n A username is considered valid if:\n 3 characters length or more\n each character can only be letter (either uppercase or lowercase), digit, '_', or '-'\n the first character is a letter\n\n Parameters\n ----------\n username: str\n\n Returns\n -------\n bool\n True if the username is valid, False otherwise.\n \"\"\"\n return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.change_password | python | def change_password(self, auth_secret, old_password, new_password):
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result) | Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L273-L352 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n",
"def _check_password(password):\n \"\"\"Check the strength of a password.\n A password is considered strong if\n 8 characters length or more\n 1 digit or more\n 1 uppercase letter or more\n 1 lowercase letter or more\n 1 symbol (excluding whitespace characters) or more\n\n Parameters\n ----------\n password: str\n\n Returns\n -------\n bool\n True if the password is strong enough, False otherwise.\n \"\"\"\n # Check the length.\n length_error = len(password) < 8\n\n # Search for digits.\n digit_error = re.search(r'\\d', password) is None\n\n # Search for uppercase letters.\n uppercase_error = re.search(r'[A-Z]', password) is None\n\n # Search for lowercase letters.\n lowercase_error = re.search(r'[a-z]', password) is None\n\n # Search for symbols (excluding whitespace characters).\n symbol_error = re.search(r'[^A-Za-z\\d\\s]', password) is None\n\n return not (length_error or digit_error or uppercase_error or\\\n lowercase_error or symbol_error)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.login | python | def login(self, username, password):
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result) | Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L354-L400 | null | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.logout | python | def logout(self, auth_secret):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result) | Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L402-L447 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.get_user_profile | python | def get_user_profile(self, auth_secret):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result) | Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L449-L489 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.post_tweet | python | def post_tweet(self, auth_secret, tweet):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result) | Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L491-L564 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.follow | python | def follow(self, auth_secret, followee_username):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result) | Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username) | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L566-L631 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.unfollow | python | def unfollow(self, auth_secret, followee_username):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result) | Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username) | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L633-L693 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.get_followers | python | def get_followers(self, auth_secret):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result) | Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L695-L745 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.get_following | python | def get_following(self, auth_secret):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result) | Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L747-L797 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis._get_tweets | python | def _get_tweets(self, tweets_key, max_cnt_tweets):
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets | Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L799-L857 | null | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.get_timeline | python | def get_timeline(self, auth_secret, max_cnt_tweets):
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result) | Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L859-L905 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n",
"def _get_tweets(self, tweets_key, max_cnt_tweets):\n \"\"\"Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.\n\n Parameters\n ----------\n tweets_key: str\n The key of the Redis list which stores the tweets.\n max_cnt_tweets: int\n The maximum number of tweets included in the returned list. If it is set to -1,\n then all the available tweets will be included.\n\n Returns\n -------\n tweets\n A list of tweets\n \"\"\"\n tweets = []\n if max_cnt_tweets == 0:\n return tweets\n elif max_cnt_tweets == -1:\n # Return all the tweets in the timeline.\n last_tweet_index = -1\n else:\n # Return at most max_cnt_tweets tweets.\n last_tweet_index = max_cnt_tweets - 1\n\n # Get the post IDs of the tweets.\n post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)\n\n if not post_ids:\n return tweets\n\n with self._rc.pipeline() as pipe:\n # Get the tweets with their user IDs and UNIX timestamps.\n pipe.multi()\n for post_id in post_ids:\n post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)\n pipe.hgetall(post_id_key)\n tweets = pipe.execute()\n\n # Get the userid-to-username mappings for all the user IDs associated with the tweets.\n userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}\n userid_list = []\n pipe.multi()\n for userid in userid_set:\n userid_list.append(userid)\n userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)\n username_list = pipe.execute()\n\n userid_to_username = {userid: username for userid, username in\\\n zip(userid_list, username_list)}\n\n # Add the username for the user ID of each tweet.\n for tweet in tweets:\n tweet[pytwis_constants.USERNAME_KEY] = \\\n userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]\n\n return tweets\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
"""Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis.py | Pytwis.get_user_tweets | python | def get_user_tweets(self, auth_secret, username, max_cnt_tweets):
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, _ = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the userid from the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Get the tweets posted by the user.
user_tweets_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = \
self._get_tweets(user_tweets_key, max_cnt_tweets)
return (True, result) | Get the tweets posted by one user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
username:
The name of the user who post the tweets and may not be the logged-in user.
max_cnt_tweets: int
The maximum number of tweets included in the return. If it is set to -1,
then all the tweets posted by the user will be included.
Returns
-------
bool
True if the tweets are successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the tweets are successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username) | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis.py#L907-L956 | [
"def _is_loggedin(self, auth_secret):\n \"\"\"Check if a user is logged-in by verifying the input authentication secret.\n\n Parameters\n ----------\n auth_secret: str\n The authentication secret of a logged-in user.\n\n Returns\n -------\n bool\n True if the authentication secret is valid, False otherwise.\n userid: str\n The user ID associated with the authentication secret if the authentication secret\n valid, None otherwise.\n \"\"\"\n # Get the userid from the authentication secret.\n userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)\n if userid is None:\n return (False, None)\n\n # Compare the input authentication secret with the stored one.\n userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)\n if auth_secret == stored_auth_secret:\n return (True, userid)\n\n # TODO: Resolve the inconsistency of the two authentication secrets.\n return (False, None)\n",
"def _get_tweets(self, tweets_key, max_cnt_tweets):\n \"\"\"Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.\n\n Parameters\n ----------\n tweets_key: str\n The key of the Redis list which stores the tweets.\n max_cnt_tweets: int\n The maximum number of tweets included in the returned list. If it is set to -1,\n then all the available tweets will be included.\n\n Returns\n -------\n tweets\n A list of tweets\n \"\"\"\n tweets = []\n if max_cnt_tweets == 0:\n return tweets\n elif max_cnt_tweets == -1:\n # Return all the tweets in the timeline.\n last_tweet_index = -1\n else:\n # Return at most max_cnt_tweets tweets.\n last_tweet_index = max_cnt_tweets - 1\n\n # Get the post IDs of the tweets.\n post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)\n\n if not post_ids:\n return tweets\n\n with self._rc.pipeline() as pipe:\n # Get the tweets with their user IDs and UNIX timestamps.\n pipe.multi()\n for post_id in post_ids:\n post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)\n pipe.hgetall(post_id_key)\n tweets = pipe.execute()\n\n # Get the userid-to-username mappings for all the user IDs associated with the tweets.\n userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}\n userid_list = []\n pipe.multi()\n for userid in userid_set:\n userid_list.append(userid)\n userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)\n pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)\n username_list = pipe.execute()\n\n userid_to_username = {userid: username for userid, username in\\\n zip(userid_list, username_list)}\n\n # Add the username for the user ID of each tweet.\n for tweet in tweets:\n tweet[pytwis_constants.USERNAME_KEY] = \\\n userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]\n\n return tweets\n"
] | class Pytwis:
"""This class implements all the interfaces to the Redis database of the Twitter-toy-clone."""
def __init__(self, hostname='127.0.0.1', port=6379, socket='', db=0, password=''):
"""Initialize the class Pytiws.
Parameters
----------
hostname : str, optional
The Redis server hostname which is usually an IP address (default: 127.0.0.1).
port : int, optional
The Redis server port number (default: 6379).
socket: str, optional
The Redis server socket which will override hostname and port if it is given.
db : int, optional
The selected Redis database index (default: 0).
password : str, optional
The Redis server password (default: '').
Raises
------
ValueError
If failed to connect to the Redis server with either ResponseError or RedisTimeoutError.
"""
if socket:
self._rc = redis.StrictRedis(
unix_socket_path=socket,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
else:
self._rc = redis.StrictRedis(
host=hostname,
port=port,
db=db,
password=password,
decode_responses=True, # Decode the response bytes into strings.
socket_connect_timeout=pytwis_constants.REDIS_SOCKET_CONNECT_TIMEOUT)
# Test the connection by ping.
try:
if self._rc.ping():
if socket:
print('Ping {} returned True'.format(socket))
else:
print('Ping {}:{} returned True'.format(hostname, port))
except (ResponseError, RedisTimeoutError) as excep:
raise ValueError(str(excep))
def _is_loggedin(self, auth_secret):
"""Check if a user is logged-in by verifying the input authentication secret.
Parameters
----------
auth_secret: str
The authentication secret of a logged-in user.
Returns
-------
bool
True if the authentication secret is valid, False otherwise.
userid: str
The user ID associated with the authentication secret if the authentication secret
valid, None otherwise.
"""
# Get the userid from the authentication secret.
userid = self._rc.hget(pytwis_constants.AUTHS_KEY, auth_secret)
if userid is None:
return (False, None)
# Compare the input authentication secret with the stored one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_auth_secret = self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
if auth_secret == stored_auth_secret:
return (True, userid)
# TODO: Resolve the inconsistency of the two authentication secrets.
return (False, None)
@staticmethod
def _check_username(username):
"""Check if a username is valid.
A username is considered valid if:
3 characters length or more
each character can only be letter (either uppercase or lowercase), digit, '_', or '-'
the first character is a letter
Parameters
----------
username: str
Returns
-------
bool
True if the username is valid, False otherwise.
"""
return re.match(r'^[A-Za-z][A-Za-z0-9_-]{2,}$', username) is not None
@staticmethod
def _check_password(password):
"""Check the strength of a password.
A password is considered strong if
8 characters length or more
1 digit or more
1 uppercase letter or more
1 lowercase letter or more
1 symbol (excluding whitespace characters) or more
Parameters
----------
password: str
Returns
-------
bool
True if the password is strong enough, False otherwise.
"""
# Check the length.
length_error = len(password) < 8
# Search for digits.
digit_error = re.search(r'\d', password) is None
# Search for uppercase letters.
uppercase_error = re.search(r'[A-Z]', password) is None
# Search for lowercase letters.
lowercase_error = re.search(r'[a-z]', password) is None
# Search for symbols (excluding whitespace characters).
symbol_error = re.search(r'[^A-Za-z\d\s]', password) is None
return not (length_error or digit_error or uppercase_error or\
lowercase_error or symbol_error)
def register(self, username, password):
"""Register a new user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the new user is successfully registered, False otherwise.
result
An empty dict if the new user is successfully registered, a dict
containing the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_ALREADY_EXISTS.format(username)
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check the username.
if not Pytwis._check_username(username):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INVALID_USERNAME
return (False, result)
# Check the password.
if not Pytwis._check_password(password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Update the username-to-userid mapping.
with self._rc.pipeline() as pipe:
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# multiple clients are registering with the same username.
pipe.watch(pytwis_constants.USERS_KEY)
username_exists = pipe.hexists(pytwis_constants.USERS_KEY, username)
if username_exists:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_ALREADY_EXISTS.format(username)
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
userid = pipe.incr(pytwis_constants.NEXT_USER_ID_KEY)
# Set the username-to-userid pair in USERS_HASH_KEY.
pipe.multi()
pipe.hset(pytwis_constants.USERS_KEY, username, userid)
pipe.execute()
break
except WatchError:
continue
# Generate the authentication secret.
auth_secret = secrets.token_hex()
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
# Generate the password hash.
# The format of the password hash looks like "method$salt$hash".
password_hash = generate_password_hash(password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
pipe.multi()
# Update the authentication_secret-to-userid mapping.
pipe.hset(pytwis_constants.AUTHS_KEY, auth_secret, userid)
# Create the user profile.
pipe.hmset(userid_profile_key,
{pytwis_constants.USERNAME_KEY: username,
pytwis_constants.PASSWORD_HASH_KEY: password_hash,
pytwis_constants.AUTH_KEY: auth_secret})
pipe.execute()
return (True, result)
def change_password(self, auth_secret, old_password, new_password):
"""Change the user password.
Parameters
----------
auth_secret: str
The authentication secret which will be used for user authentication.
old_password: str
The old password before the change.
new_password: str
The new password after the change.
Returns
-------
bool
True if the password is successfully changed, False otherwise.
result
A dict containing the new authentication secret with the key AUTH_KEY
if the password is successfully changed, a dict containing the error
string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NEW_PASSWORD_NO_CHANGE
- ERROR_NOT_LOGGED_IN
- ERROR_INCORRECT_OLD_PASSWORD
- ERROR_WEAK_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
if old_password == new_password:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NEW_PASSWORD_NO_CHANGE
return (False, result)
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Check if the old password matches.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if not check_password_hash(stored_password_hash, old_password):
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_INCORRECT_OLD_PASSWORD
return (False, result)
# Check the password.
if not Pytwis._check_password(new_password):
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_WEAK_PASSWORD
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Generate the new password hash.
# The format of the new password hash looks like "method$salt$hash".
new_password_hash = generate_password_hash(new_password,
method=\
pytwis_constants.PASSWORD_HASH_METHOD)
# Replace the old password hash by the new one and the old authentication secret
# by the new one.
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY,
new_password_hash)
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.AUTH_KEY] = new_auth_secret
return (True, result)
def login(self, username, password):
"""Log into a user.
Parameters
----------
username: str
The username.
password: str
The password.
Returns
-------
bool
True if the login is successful, False otherwise.
result
A dict containing the authentication secret with the key AUTH_KEY
if the login is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
- ERROR_INCORRECT_PASSWORD
"""
result = {pytwis_constants.ERROR_KEY: None}
# Get the user-id based on the username.
userid = self._rc.hget(pytwis_constants.USERS_KEY, username)
if userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_USERNAME_NOT_EXIST_FORMAT.format(username)
return (False, result)
# Compare the input password hash with the stored one. If it matches,
# return the authentication secret.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
stored_password_hash = self._rc.hget(userid_profile_key,
pytwis_constants.PASSWORD_HASH_KEY)
if check_password_hash(stored_password_hash, password):
result[pytwis_constants.AUTH_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.AUTH_KEY)
return (True, result)
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_INCORRECT_PASSWORD
return (False, result)
def logout(self, auth_secret):
"""Log out of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
None if the logout is successful, a dict containing the error string
with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Generate the new authentication secret.
new_auth_secret = secrets.token_hex()
# Replace the old authentication secret by the new one.
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
with self._rc.pipeline() as pipe:
pipe.multi()
pipe.hset(userid_profile_key, pytwis_constants.AUTH_KEY, new_auth_secret)
pipe.hset(pytwis_constants.AUTHS_KEY, new_auth_secret, userid)
pipe.hdel(pytwis_constants.AUTHS_KEY, auth_secret)
pipe.execute()
result[pytwis_constants.USERNAME_KEY] = \
self._rc.hget(userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.AUTH_KEY] = ''
return (True, result)
def get_user_profile(self, auth_secret):
"""Get the profile (i.e., username, password, etc.) of a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the logout is successful, False otherwise.
result
A dict containing the following keys:
- USERNAME_KEY
- PASSWORD_HASH_KEY
- AUTH_KEY
if the user profile is obtained successfully; otherwise a dict
containing the error string with the key ERROR_KEY.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
userid_profile_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
result = self._rc.hgetall(userid_profile_key)
return (True, result)
def post_tweet(self, auth_secret, tweet):
"""Post a tweet.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
tweet: str
The tweet that will be posted.
Returns
-------
bool
True if the tweet is successfully posted, False otherwise.
result
None if the tweet is successfully posted, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the next user-id. If the key "next_user_id" doesn't exist,
# it will be created and initialized as 0, and then incremented by 1.
post_id = self._rc.incr(pytwis_constants.NEXT_TWEET_ID_KEY)
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
post_id_timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
post_id_user_key = pytwis_constants.USER_TWEETS_KEY_FORMAT.format(userid)
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
followers = self._rc.zrange(follower_zset_key, 0, -1)
unix_time = int(time.time())
with self._rc.pipeline() as pipe:
pipe.multi()
# Store the tweet with its user ID and UNIX timestamp.
pipe.hmset(post_id_key,
{pytwis_constants.TWEET_USERID_KEY: userid,
pytwis_constants.TWEET_UNIXTIME_KEY: unix_time,
pytwis_constants.TWEET_BODY_KEY: tweet})
# Add the tweet to the user timeline.
pipe.lpush(post_id_timeline_key, post_id)
# Add the tweet to the tweet list posted by the user.
pipe.lpush(post_id_user_key, post_id)
# Write fanout the tweet to all the followers' timelines.
for follower in followers:
post_id_follower_key = \
pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(follower)
pipe.lpush(post_id_follower_key, post_id)
# Add the tweet to the general timeline and left trim the general timeline
# to only retain the latest GENERAL_TIMELINE_LIST_MAX_TWEET_CNT tweets.
pipe.lpush(pytwis_constants.GENERAL_TIMELINE_KEY, post_id)
pipe.ltrim(pytwis_constants.GENERAL_TIMELINE_KEY,
0,
pytwis_constants.GENERAL_TIMELINE_MAX_TWEET_CNT - 1)
pipe.execute()
return (True, result)
def follow(self, auth_secret, followee_username):
"""Follow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the follow is successful, False otherwise.
result
None if the follow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
- ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
elif followee_userid == userid:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOW_YOURSELF_FORMAT.format(followee_username)
return (False, result)
break
except WatchError:
continue
# Update the two zset 'followers:[followee_username]' and 'following:[username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
unix_time = int(time.time())
pipe.multi()
pipe.zadd(follower_zset_key, unix_time, userid)
pipe.zadd(following_zset_key, unix_time, followee_userid)
pipe.execute()
return (True, result)
def unfollow(self, auth_secret, followee_username):
"""Unfollow a user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
followee_username: str
The username of the followee.
Returns
-------
bool
True if the unfollow is successful, False otherwise.
result
None if the unfollow is successful, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
- ERROR_FOLLOWEE_NOT_EXIST_FORMAT.format(followee_username)
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
with self._rc.pipeline() as pipe:
# Check if the followee exists.
while True:
try:
# Put a watch on the Hash 'users': username -> user-id, in case that
# other clients are modifying the Hash 'users'.
pipe.watch(pytwis_constants.USERS_KEY)
followee_userid = pipe.hget(pytwis_constants.USERS_KEY, followee_username)
if followee_userid is None:
result[pytwis_constants.ERROR_KEY] = \
pytwis_constants.ERROR_FOLLOWEE_NOT_EXIST_FORMAT.\
format(followee_username)
return (False, result)
break
except WatchError:
continue
# Remove followee_userid from the zset 'following:[username]' and remove userid
# from the zset 'followers:[followee_username]'.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(followee_userid)
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
pipe.multi()
pipe.zrem(follower_zset_key, userid)
pipe.zrem(following_zset_key, followee_userid)
pipe.execute()
return (True, result)
def get_followers(self, auth_secret):
"""Get the follower list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the follower list is successfully obtained, False otherwise.
result
A dict containing the follower list with the key FOLLOWER_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid)
follower_userids = self._rc.zrange(follower_zset_key, 0, -1)
if follower_userids is None or not follower_userids:
result[pytwis_constants.FOLLOWER_LIST_KEY] = []
return (True, result)
# Get the list of followers' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for follower_userid in follower_userids:
follower_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid)
pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute()
return (True, result)
def get_following(self, auth_secret):
"""Get the following list of a logged-in user.
Parameters
----------
auth_secret: str
The authentication secret of the logged-in user.
Returns
-------
bool
True if the following list is successfully obtained, False otherwise.
result
A dict containing the following list with the key FOLLOWING_LIST_KEY
if the follower list is successfully obtained, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the list of followers' userids.
following_zset_key = pytwis_constants.FOLLOWING_KEY_FORMAT.format(userid)
following_userids = self._rc.zrange(following_zset_key, 0, -1)
if following_userids is None or not following_userids:
result[pytwis_constants.FOLLOWING_LIST_KEY] = []
return (True, result)
# Get the list of followings' usernames from their userids.
with self._rc.pipeline() as pipe:
pipe.multi()
for following_userid in following_userids:
following_userid_profile_key = \
pytwis_constants.USER_PROFILE_KEY_FORMAT.format(following_userid)
pipe.hget(following_userid_profile_key, pytwis_constants.USERNAME_KEY)
result[pytwis_constants.FOLLOWING_LIST_KEY] = pipe.execute()
return (True, result)
def _get_tweets(self, tweets_key, max_cnt_tweets):
"""Get at most `max_cnt_tweets` tweets from the Redis list `tweets_key`.
Parameters
----------
tweets_key: str
The key of the Redis list which stores the tweets.
max_cnt_tweets: int
The maximum number of tweets included in the returned list. If it is set to -1,
then all the available tweets will be included.
Returns
-------
tweets
A list of tweets
"""
tweets = []
if max_cnt_tweets == 0:
return tweets
elif max_cnt_tweets == -1:
# Return all the tweets in the timeline.
last_tweet_index = -1
else:
# Return at most max_cnt_tweets tweets.
last_tweet_index = max_cnt_tweets - 1
# Get the post IDs of the tweets.
post_ids = self._rc.lrange(tweets_key, 0, last_tweet_index)
if not post_ids:
return tweets
with self._rc.pipeline() as pipe:
# Get the tweets with their user IDs and UNIX timestamps.
pipe.multi()
for post_id in post_ids:
post_id_key = pytwis_constants.TWEET_KEY_FORMAT.format(post_id)
pipe.hgetall(post_id_key)
tweets = pipe.execute()
# Get the userid-to-username mappings for all the user IDs associated with the tweets.
userid_set = {tweet[pytwis_constants.TWEET_USERID_KEY] for tweet in tweets}
userid_list = []
pipe.multi()
for userid in userid_set:
userid_list.append(userid)
userid_key = pytwis_constants.USER_PROFILE_KEY_FORMAT.format(userid)
pipe.hget(userid_key, pytwis_constants.USERNAME_KEY)
username_list = pipe.execute()
userid_to_username = {userid: username for userid, username in\
zip(userid_list, username_list)}
# Add the username for the user ID of each tweet.
for tweet in tweets:
tweet[pytwis_constants.USERNAME_KEY] = \
userid_to_username[tweet[pytwis_constants.TWEET_USERID_KEY]]
return tweets
def get_timeline(self, auth_secret, max_cnt_tweets):
"""Get the general or user timeline.
If an empty authentication secret is given, this method returns the general timeline.
If an authentication secret is given and it is valid, this method returns the user timeline.
If an authentication secret is given but it is invalid, this method returns an error.
Parameters
----------
auth_secret: str
Either the authentication secret of the logged-in user or an empty string.
max_cnt_tweets: int
The maximum number of tweets included in the timeline. If it is set to -1,
then all the available tweets will be included.
Returns
-------
bool
True if the timeline is successfully retrieved, False otherwise.
result
A dict containing a list of tweets with the key TWEETS_KEY if
the timeline is successfully retrieved, a dict containing
the error string with the key ERROR_KEY otherwise.
Note
----
Possible error strings are listed as below:
- ERROR_NOT_LOGGED_IN
"""
result = {pytwis_constants.ERROR_KEY: None}
if auth_secret == '':
# An empty authentication secret implies getting the general timeline.
timeline_key = pytwis_constants.GENERAL_TIMELINE_KEY
else:
# Check if the user is logged in.
loggedin, userid = self._is_loggedin(auth_secret)
if not loggedin:
result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN
return (False, result)
# Get the user timeline.
timeline_key = pytwis_constants.USER_TIMELINE_KEY_FORMAT.format(userid)
result[pytwis_constants.TWEETS_KEY] = self._get_tweets(timeline_key, max_cnt_tweets)
return (True, result)
|
renweizhukov/pytwis | pytwis/pytwis_clt.py | validate_command | python | def validate_command(raw_command):
parsed_command = raw_command.split()
arg_count = len(parsed_command) - 1
if not parsed_command:
return
if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_REGISTER,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_LOGIN,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
if arg_count < 3:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_POST:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,
arg=pytwis_clt_constants.ARG_TWEET))
elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:
if arg_count > 1:
raise ValueError('{cmd} {{{arg}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_TIMELINE,
arg=pytwis_clt_constants.ARG_MAX_TWEETS))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
if arg_count > 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS))
elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(parsed_command[0] == pytwis_clt_constants.CMD_QUIT):
pass
else:
raise ValueError('Invalid pytwis command') | Validate the command input.
Currently we only check the number of arguments according to the command type.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Raises
------
ValueError
If the raw command input doesn't have the correct number of arguments. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis_clt.py#L124-L199 | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A command-line tool which uses `pytwis` to interact with the Redis database of
a Twitter toy clone.
To see the help information,
.. code:: bash
$ ./pytwis_clt.py -h
$ ./pytwis_clt.py --help
After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
.. code:: bash
127.0.0.1:6379> register {username} {password}
* Log into a user:
.. code:: bash
127.0.0.1:6379> login {username} {password}
* Log out of a user:
.. code:: bash
127.0.0.1:6379> logout
* Change the password:
.. code:: bash
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
.. code:: bash
127.0.0.1:6379> userprofile
* Post a tweet:
.. code:: bash
127.0.0.1:6379> post {tweet}
* Follow a user:
.. code:: bash
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
.. code:: bash
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
.. code:: bash
127.0.0.1:6379> followers
* Get the following list:
.. code:: bash
127.0.0.1:6379> followings
* Get the timeline:
.. code:: bash
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
.. code:: bash
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
.. code:: bash
127.0.0.1:6379> exit
127.0.0.1:6379> quit
"""
import argparse
import datetime
import sys
import parse
if __package__:
# If this module is imported as part of the pytwis package, then use
# the relative import.
from . import pytwis_constants
from . import pytwis_clt_constants
from . import pytwis
else:
# If this module is executed locally as a script, then don't use
# the relative import.
import pytwis_constants # pylint: disable=import-error
import pytwis_clt_constants # pylint: disable=import-error
import pytwis
def pytwis_command_parser(raw_command):
"""Parse the command input.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Returns
-------
arg_dict: dict(str, str or int)
The parsed command output.
{'command':'register', 'username': <username>, 'password': <password>} for `register`.
Raises
------
ValueError
If the raw command can't be parsed correctly, e.g., it has an incorrect number of
arguments or incorrect arguments.
"""
validate_command(raw_command)
# Some command (e.g., logout) may not have arguments.
# Separate the command from its arguments.
splited_raw_command = raw_command.split(' ', 1)
arg_dict = {}
if splited_raw_command[0] == pytwis_clt_constants.CMD_REGISTER:
# register must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_REGISTER))
elif ' ' in arg_dict[pytwis_clt_constants.ARG_PASSWORD]:
raise ValueError("password can't contain spaces")
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_REGISTER,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGIN:
# login must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_LOGIN))
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_LOGIN,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGOUT:
# logout doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
# changepwd must have three arguments: old_password, new_password, and
# confirmed_new_password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.\
format(pytwis_clt_constants.CMD_CHANGE_PASSWORD))
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] !=\
arg_dict[pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD]:
raise ValueError('The confirmed new password is different from the new password')
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] ==\
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD]:
raise ValueError('The new password is the same as the old password')
print('{}: old = {}, new = {}'.format(pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD],
arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
# userprofile doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_POST:
# post must have one argument: tweet
arg_dict = {pytwis_clt_constants.ARG_TWEET: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_FOLLOW:
# follow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
# unfollow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
# followers doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
# followings doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_TIMELINE:
# timeline has either zero or one argument.
max_cnt_tweets = -1
if len(splited_raw_command) >= 2:
max_cnt_tweets = int(splited_raw_command[1])
arg_dict = {pytwis_clt_constants.ARG_MAX_TWEETS: max_cnt_tweets}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# tweetsby has either zero or one or two arguments.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: None,
pytwis_clt_constants.ARG_MAX_TWEETS: -1}
if len(splited_raw_command) >= 2:
# tweetsby has either one or two arguments.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}:d}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS),
args)
if arg_dict is None:
# tweetsby has only one argument.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: args}
arg_dict[pytwis_clt_constants.ARG_MAX_TWEETS] = -1
elif (splited_raw_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(splited_raw_command[0] == pytwis_clt_constants.CMD_QUIT):
# exit or quit doesn't have any arguments.
pass
else:
pass
if isinstance(arg_dict, parse.Result):
arg_dict = arg_dict.named
arg_dict[pytwis_clt_constants.ARG_COMMAND] = splited_raw_command[0]
return arg_dict
def print_tweets(tweets):
"""Print a list of tweets one by one separated by "="s.
Parameters
----------
tweets: list(dict)
A list of tweets. Each tweet is a dict containing the username of the tweet's author,
the post time, and the tweet body.
"""
print('=' * 60)
for index, tweet in enumerate(tweets):
print('-' * 60)
print('Tweet {}:'.format(index))
print('Username:', tweet[pytwis_constants.USERNAME_KEY])
print('Time:',
datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\
strftime('%Y-%m-%d %H:%M:%S'))
print('Body:\n\t', tweet[pytwis_constants.TWEET_BODY_KEY])
print('-' * 60)
print('=' * 60)
def pytwis_command_processor(twis, auth_secret, args):
"""Process the parsed command.
Parameters
----------
twis: Pytwis
A Pytwis instance which interacts with the Redis database of the Twitter toy clone.
auth_secret: str
The authentication secret of a logged-in user.
args:
The parsed command output by pytwis_command_parser().
"""
command = args[pytwis_clt_constants.ARG_COMMAND]
if command == pytwis_clt_constants.CMD_REGISTER:
succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't register {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGIN:
succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't log into username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGOUT:
succeeded, result = twis.logout(auth_secret[0])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY]))
else:
print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
succeeded, result = twis.change_password(auth_secret[0],
args[pytwis_clt_constants.ARG_OLD_PASSWORD],
args[pytwis_clt_constants.ARG_NEW_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Changed the password')
else:
print("Couldn't change the password with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
print('Got the user profile')
print('=' * 20)
for key, value in result.items():
print('{}: {}'.format(key, value))
print('=' * 20)
else:
print("Couldn't get the user profile with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_POST:
succeeded, result = twis.post_tweet(auth_secret[0], args['tweet'])
if succeeded:
print('Posted the tweet')
else:
print("Couldn't post the tweet with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_FOLLOW:
succeeded, result = twis.follow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't follow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_UNFOLLOW:
succeeded, result = twis.unfollow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't unfollow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS:
succeeded, result = twis.get_followers(auth_secret[0])
if succeeded:
print('Got the list of {} followers'.\
format(len(result[pytwis_constants.FOLLOWER_LIST_KEY])))
print('=' * 20)
for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]:
print('\t' + follower)
print('=' * 20)
else:
print("Couldn't get the follower list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
succeeded, result = twis.get_following(auth_secret[0])
if succeeded:
print('Got the list of {} followings'.\
format(len(result[pytwis_constants.FOLLOWING_LIST_KEY])))
print('=' * 60)
for following in result[pytwis_constants.FOLLOWING_LIST_KEY]:
print('\t' + following)
print('=' * 60)
else:
print("Couldn't get the following list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_TIMELINE:
succeeded, result = twis.get_timeline(auth_secret[0],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
if auth_secret[0] != '':
print('Got {} tweets in the user timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
else:
print('Got {} tweets in the general timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
if auth_secret[0] != '':
print("Couldn't get the user timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
else:
print("Couldn't get the general timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# Get the username of the currently logged-in user if no username is given.
if args[pytwis_clt_constants.ARG_USERNAME] is None:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY]
print('No username is given, so use the currently logged-in user {}'.\
format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't get the username of the currently logged-in user with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
return
succeeded, result = twis.get_user_tweets(auth_secret[0],
args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]),
args[pytwis_clt_constants.ARG_USERNAME]))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
print("Couldn't get the tweets posted by {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
else:
pass
def get_pytwis(epilog):
"""Connect to the Redis database and return the Pytwis instance.
Parameters
----------
epilog: str
An epilog string which will be displayed by ArgumentParser.
Returns
-------
pytwis: A Pytwis instance.
prompt: str
The prompt string which contains either the hostname and the port or the socket.
Raises
------
ValueError
If we fail to connect to the Redis server.
"""
# Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse
# the short help option '-h' for the host name.
parser = argparse.ArgumentParser(conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=\
'Connect to the Redis database of a Twitter clone and '
'then run commands to access and update the database.',
epilog=epilog)
parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',
help='''the Redis server hostname. If the option is not specified,
will be defaulted to 127.0.0.1. If the option is specified but
no value is given after the option, then the help information
is displayed instead.
''')
parser.add_argument('-p', '--port', default=6379,
help='''the Redis server port. If the option is not specified, will
be defaulted to 6379.
''')
parser.add_argument('-s', '--socket', default='',
help='''the Redis server socket (usually /tmp/redis.sock). If it is
given, it will override hostname and port. Make sure that the
unixsocket parameter is defined in your redis.conf file. It’s
commented out by default.
''')
parser.add_argument('-n', '--db', default=0,
help='''the Redis server database. If the option is not specified,
will be defaulted to 0.
''')
parser.add_argument('-a', '--password', default='',
help='''the Redis server password. If the option not specified,
will be defaulted to an empty string.
''')
args = parser.parse_args()
# If no value is given after the option '-h', then the help information is displayed.
if args.hostname is None:
parser.print_help()
return 0
if args.socket:
print('The input Redis server socket is {}'.format(args.socket))
prompt = args.socket
else:
print('The input Redis server hostname is {}.'.format(args.hostname))
print('The input Redis server port is {}.'.format(args.port))
prompt = '{}:{}'.format(args.hostname, args.port)
print('The input Redis server database is {}.'.format(args.db))
if args.password != '':
print('The input Redis server password is "{}".'.format(args.password))
else:
print('The input Redis server password is empty.')
try:
if args.socket:
twis = pytwis.Pytwis(socket=args.socket,
db=args.db,
password=args.password)
else:
twis = pytwis.Pytwis(hostname=args.hostname,
port=args.port,
db=args.db,
password=args.password)
return twis, prompt
except ValueError as excep:
print('Failed to connect to the Redis server: {}'.format(str(excep)),
file=sys.stderr)
return None, None
def pytwis_clt():
"""The main routine of this command-line tool."""
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict)
if __name__ == "__main__":
pytwis_clt()
|
renweizhukov/pytwis | pytwis/pytwis_clt.py | pytwis_command_parser | python | def pytwis_command_parser(raw_command):
validate_command(raw_command)
# Some command (e.g., logout) may not have arguments.
# Separate the command from its arguments.
splited_raw_command = raw_command.split(' ', 1)
arg_dict = {}
if splited_raw_command[0] == pytwis_clt_constants.CMD_REGISTER:
# register must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_REGISTER))
elif ' ' in arg_dict[pytwis_clt_constants.ARG_PASSWORD]:
raise ValueError("password can't contain spaces")
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_REGISTER,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGIN:
# login must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_LOGIN))
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_LOGIN,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGOUT:
# logout doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
# changepwd must have three arguments: old_password, new_password, and
# confirmed_new_password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.\
format(pytwis_clt_constants.CMD_CHANGE_PASSWORD))
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] !=\
arg_dict[pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD]:
raise ValueError('The confirmed new password is different from the new password')
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] ==\
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD]:
raise ValueError('The new password is the same as the old password')
print('{}: old = {}, new = {}'.format(pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD],
arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
# userprofile doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_POST:
# post must have one argument: tweet
arg_dict = {pytwis_clt_constants.ARG_TWEET: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_FOLLOW:
# follow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
# unfollow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
# followers doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
# followings doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_TIMELINE:
# timeline has either zero or one argument.
max_cnt_tweets = -1
if len(splited_raw_command) >= 2:
max_cnt_tweets = int(splited_raw_command[1])
arg_dict = {pytwis_clt_constants.ARG_MAX_TWEETS: max_cnt_tweets}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# tweetsby has either zero or one or two arguments.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: None,
pytwis_clt_constants.ARG_MAX_TWEETS: -1}
if len(splited_raw_command) >= 2:
# tweetsby has either one or two arguments.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}:d}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS),
args)
if arg_dict is None:
# tweetsby has only one argument.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: args}
arg_dict[pytwis_clt_constants.ARG_MAX_TWEETS] = -1
elif (splited_raw_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(splited_raw_command[0] == pytwis_clt_constants.CMD_QUIT):
# exit or quit doesn't have any arguments.
pass
else:
pass
if isinstance(arg_dict, parse.Result):
arg_dict = arg_dict.named
arg_dict[pytwis_clt_constants.ARG_COMMAND] = splited_raw_command[0]
return arg_dict | Parse the command input.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Returns
-------
arg_dict: dict(str, str or int)
The parsed command output.
{'command':'register', 'username': <username>, 'password': <password>} for `register`.
Raises
------
ValueError
If the raw command can't be parsed correctly, e.g., it has an incorrect number of
arguments or incorrect arguments. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis_clt.py#L202-L337 | [
"def validate_command(raw_command):\n \"\"\"Validate the command input.\n\n Currently we only check the number of arguments according to the command type.\n\n Parameters\n ----------\n raw_command: str\n The raw command input, e.g., `register xxxxxx yyyyyy`.\n\n Raises\n ------\n ValueError\n If the raw command input doesn't have the correct number of arguments.\n \"\"\"\n parsed_command = raw_command.split()\n arg_count = len(parsed_command) - 1\n\n if not parsed_command:\n return\n\n if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:\n if arg_count < 2:\n raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\\\n format(cmd=pytwis_clt_constants.CMD_REGISTER,\n arg1=pytwis_clt_constants.ARG_USERNAME,\n arg2=pytwis_clt_constants.ARG_PASSWORD))\n elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:\n if arg_count < 2:\n raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\\\n format(cmd=pytwis_clt_constants.CMD_LOGIN,\n arg1=pytwis_clt_constants.ARG_USERNAME,\n arg2=pytwis_clt_constants.ARG_PASSWORD))\n elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:\n pass\n elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:\n if arg_count < 3:\n raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\\\n format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,\n arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,\n arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,\n arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))\n elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:\n pass\n elif parsed_command[0] == pytwis_clt_constants.CMD_POST:\n if arg_count < 1:\n raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,\n arg=pytwis_clt_constants.ARG_TWEET))\n elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:\n if arg_count < 1:\n raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,\n arg=pytwis_clt_constants.ARG_FOLLOWEE))\n elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:\n if arg_count < 1:\n raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,\n arg=pytwis_clt_constants.ARG_FOLLOWEE))\n elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:\n pass\n elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:\n pass\n elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:\n if arg_count > 1:\n raise ValueError('{cmd} {{{arg}}} or {cmd}'.\\\n format(cmd=pytwis_clt_constants.CMD_TIMELINE,\n arg=pytwis_clt_constants.ARG_MAX_TWEETS))\n elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:\n if arg_count > 2:\n raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\\\n format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,\n arg1=pytwis_clt_constants.ARG_USERNAME,\n arg2=pytwis_clt_constants.ARG_MAX_TWEETS))\n elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\\\n (parsed_command[0] == pytwis_clt_constants.CMD_QUIT):\n pass\n else:\n raise ValueError('Invalid pytwis command')\n"
] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A command-line tool which uses `pytwis` to interact with the Redis database of
a Twitter toy clone.
To see the help information,
.. code:: bash
$ ./pytwis_clt.py -h
$ ./pytwis_clt.py --help
After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
.. code:: bash
127.0.0.1:6379> register {username} {password}
* Log into a user:
.. code:: bash
127.0.0.1:6379> login {username} {password}
* Log out of a user:
.. code:: bash
127.0.0.1:6379> logout
* Change the password:
.. code:: bash
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
.. code:: bash
127.0.0.1:6379> userprofile
* Post a tweet:
.. code:: bash
127.0.0.1:6379> post {tweet}
* Follow a user:
.. code:: bash
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
.. code:: bash
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
.. code:: bash
127.0.0.1:6379> followers
* Get the following list:
.. code:: bash
127.0.0.1:6379> followings
* Get the timeline:
.. code:: bash
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
.. code:: bash
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
.. code:: bash
127.0.0.1:6379> exit
127.0.0.1:6379> quit
"""
import argparse
import datetime
import sys
import parse
if __package__:
# If this module is imported as part of the pytwis package, then use
# the relative import.
from . import pytwis_constants
from . import pytwis_clt_constants
from . import pytwis
else:
# If this module is executed locally as a script, then don't use
# the relative import.
import pytwis_constants # pylint: disable=import-error
import pytwis_clt_constants # pylint: disable=import-error
import pytwis
def validate_command(raw_command):
"""Validate the command input.
Currently we only check the number of arguments according to the command type.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Raises
------
ValueError
If the raw command input doesn't have the correct number of arguments.
"""
parsed_command = raw_command.split()
arg_count = len(parsed_command) - 1
if not parsed_command:
return
if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_REGISTER,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_LOGIN,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
if arg_count < 3:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_POST:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,
arg=pytwis_clt_constants.ARG_TWEET))
elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:
if arg_count > 1:
raise ValueError('{cmd} {{{arg}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_TIMELINE,
arg=pytwis_clt_constants.ARG_MAX_TWEETS))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
if arg_count > 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS))
elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(parsed_command[0] == pytwis_clt_constants.CMD_QUIT):
pass
else:
raise ValueError('Invalid pytwis command')
def print_tweets(tweets):
"""Print a list of tweets one by one separated by "="s.
Parameters
----------
tweets: list(dict)
A list of tweets. Each tweet is a dict containing the username of the tweet's author,
the post time, and the tweet body.
"""
print('=' * 60)
for index, tweet in enumerate(tweets):
print('-' * 60)
print('Tweet {}:'.format(index))
print('Username:', tweet[pytwis_constants.USERNAME_KEY])
print('Time:',
datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\
strftime('%Y-%m-%d %H:%M:%S'))
print('Body:\n\t', tweet[pytwis_constants.TWEET_BODY_KEY])
print('-' * 60)
print('=' * 60)
def pytwis_command_processor(twis, auth_secret, args):
"""Process the parsed command.
Parameters
----------
twis: Pytwis
A Pytwis instance which interacts with the Redis database of the Twitter toy clone.
auth_secret: str
The authentication secret of a logged-in user.
args:
The parsed command output by pytwis_command_parser().
"""
command = args[pytwis_clt_constants.ARG_COMMAND]
if command == pytwis_clt_constants.CMD_REGISTER:
succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't register {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGIN:
succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't log into username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGOUT:
succeeded, result = twis.logout(auth_secret[0])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY]))
else:
print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
succeeded, result = twis.change_password(auth_secret[0],
args[pytwis_clt_constants.ARG_OLD_PASSWORD],
args[pytwis_clt_constants.ARG_NEW_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Changed the password')
else:
print("Couldn't change the password with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
print('Got the user profile')
print('=' * 20)
for key, value in result.items():
print('{}: {}'.format(key, value))
print('=' * 20)
else:
print("Couldn't get the user profile with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_POST:
succeeded, result = twis.post_tweet(auth_secret[0], args['tweet'])
if succeeded:
print('Posted the tweet')
else:
print("Couldn't post the tweet with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_FOLLOW:
succeeded, result = twis.follow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't follow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_UNFOLLOW:
succeeded, result = twis.unfollow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't unfollow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS:
succeeded, result = twis.get_followers(auth_secret[0])
if succeeded:
print('Got the list of {} followers'.\
format(len(result[pytwis_constants.FOLLOWER_LIST_KEY])))
print('=' * 20)
for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]:
print('\t' + follower)
print('=' * 20)
else:
print("Couldn't get the follower list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
succeeded, result = twis.get_following(auth_secret[0])
if succeeded:
print('Got the list of {} followings'.\
format(len(result[pytwis_constants.FOLLOWING_LIST_KEY])))
print('=' * 60)
for following in result[pytwis_constants.FOLLOWING_LIST_KEY]:
print('\t' + following)
print('=' * 60)
else:
print("Couldn't get the following list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_TIMELINE:
succeeded, result = twis.get_timeline(auth_secret[0],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
if auth_secret[0] != '':
print('Got {} tweets in the user timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
else:
print('Got {} tweets in the general timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
if auth_secret[0] != '':
print("Couldn't get the user timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
else:
print("Couldn't get the general timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# Get the username of the currently logged-in user if no username is given.
if args[pytwis_clt_constants.ARG_USERNAME] is None:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY]
print('No username is given, so use the currently logged-in user {}'.\
format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't get the username of the currently logged-in user with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
return
succeeded, result = twis.get_user_tweets(auth_secret[0],
args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]),
args[pytwis_clt_constants.ARG_USERNAME]))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
print("Couldn't get the tweets posted by {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
else:
pass
def get_pytwis(epilog):
"""Connect to the Redis database and return the Pytwis instance.
Parameters
----------
epilog: str
An epilog string which will be displayed by ArgumentParser.
Returns
-------
pytwis: A Pytwis instance.
prompt: str
The prompt string which contains either the hostname and the port or the socket.
Raises
------
ValueError
If we fail to connect to the Redis server.
"""
# Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse
# the short help option '-h' for the host name.
parser = argparse.ArgumentParser(conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=\
'Connect to the Redis database of a Twitter clone and '
'then run commands to access and update the database.',
epilog=epilog)
parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',
help='''the Redis server hostname. If the option is not specified,
will be defaulted to 127.0.0.1. If the option is specified but
no value is given after the option, then the help information
is displayed instead.
''')
parser.add_argument('-p', '--port', default=6379,
help='''the Redis server port. If the option is not specified, will
be defaulted to 6379.
''')
parser.add_argument('-s', '--socket', default='',
help='''the Redis server socket (usually /tmp/redis.sock). If it is
given, it will override hostname and port. Make sure that the
unixsocket parameter is defined in your redis.conf file. It’s
commented out by default.
''')
parser.add_argument('-n', '--db', default=0,
help='''the Redis server database. If the option is not specified,
will be defaulted to 0.
''')
parser.add_argument('-a', '--password', default='',
help='''the Redis server password. If the option not specified,
will be defaulted to an empty string.
''')
args = parser.parse_args()
# If no value is given after the option '-h', then the help information is displayed.
if args.hostname is None:
parser.print_help()
return 0
if args.socket:
print('The input Redis server socket is {}'.format(args.socket))
prompt = args.socket
else:
print('The input Redis server hostname is {}.'.format(args.hostname))
print('The input Redis server port is {}.'.format(args.port))
prompt = '{}:{}'.format(args.hostname, args.port)
print('The input Redis server database is {}.'.format(args.db))
if args.password != '':
print('The input Redis server password is "{}".'.format(args.password))
else:
print('The input Redis server password is empty.')
try:
if args.socket:
twis = pytwis.Pytwis(socket=args.socket,
db=args.db,
password=args.password)
else:
twis = pytwis.Pytwis(hostname=args.hostname,
port=args.port,
db=args.db,
password=args.password)
return twis, prompt
except ValueError as excep:
print('Failed to connect to the Redis server: {}'.format(str(excep)),
file=sys.stderr)
return None, None
def pytwis_clt():
"""The main routine of this command-line tool."""
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict)
if __name__ == "__main__":
pytwis_clt()
|
renweizhukov/pytwis | pytwis/pytwis_clt.py | print_tweets | python | def print_tweets(tweets):
print('=' * 60)
for index, tweet in enumerate(tweets):
print('-' * 60)
print('Tweet {}:'.format(index))
print('Username:', tweet[pytwis_constants.USERNAME_KEY])
print('Time:',
datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\
strftime('%Y-%m-%d %H:%M:%S'))
print('Body:\n\t', tweet[pytwis_constants.TWEET_BODY_KEY])
print('-' * 60)
print('=' * 60) | Print a list of tweets one by one separated by "="s.
Parameters
----------
tweets: list(dict)
A list of tweets. Each tweet is a dict containing the username of the tweet's author,
the post time, and the tweet body. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis_clt.py#L340-L359 | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A command-line tool which uses `pytwis` to interact with the Redis database of
a Twitter toy clone.
To see the help information,
.. code:: bash
$ ./pytwis_clt.py -h
$ ./pytwis_clt.py --help
After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
.. code:: bash
127.0.0.1:6379> register {username} {password}
* Log into a user:
.. code:: bash
127.0.0.1:6379> login {username} {password}
* Log out of a user:
.. code:: bash
127.0.0.1:6379> logout
* Change the password:
.. code:: bash
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
.. code:: bash
127.0.0.1:6379> userprofile
* Post a tweet:
.. code:: bash
127.0.0.1:6379> post {tweet}
* Follow a user:
.. code:: bash
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
.. code:: bash
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
.. code:: bash
127.0.0.1:6379> followers
* Get the following list:
.. code:: bash
127.0.0.1:6379> followings
* Get the timeline:
.. code:: bash
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
.. code:: bash
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
.. code:: bash
127.0.0.1:6379> exit
127.0.0.1:6379> quit
"""
import argparse
import datetime
import sys
import parse
if __package__:
# If this module is imported as part of the pytwis package, then use
# the relative import.
from . import pytwis_constants
from . import pytwis_clt_constants
from . import pytwis
else:
# If this module is executed locally as a script, then don't use
# the relative import.
import pytwis_constants # pylint: disable=import-error
import pytwis_clt_constants # pylint: disable=import-error
import pytwis
def validate_command(raw_command):
"""Validate the command input.
Currently we only check the number of arguments according to the command type.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Raises
------
ValueError
If the raw command input doesn't have the correct number of arguments.
"""
parsed_command = raw_command.split()
arg_count = len(parsed_command) - 1
if not parsed_command:
return
if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_REGISTER,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_LOGIN,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
if arg_count < 3:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_POST:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,
arg=pytwis_clt_constants.ARG_TWEET))
elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:
if arg_count > 1:
raise ValueError('{cmd} {{{arg}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_TIMELINE,
arg=pytwis_clt_constants.ARG_MAX_TWEETS))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
if arg_count > 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS))
elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(parsed_command[0] == pytwis_clt_constants.CMD_QUIT):
pass
else:
raise ValueError('Invalid pytwis command')
def pytwis_command_parser(raw_command):
"""Parse the command input.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Returns
-------
arg_dict: dict(str, str or int)
The parsed command output.
{'command':'register', 'username': <username>, 'password': <password>} for `register`.
Raises
------
ValueError
If the raw command can't be parsed correctly, e.g., it has an incorrect number of
arguments or incorrect arguments.
"""
validate_command(raw_command)
# Some command (e.g., logout) may not have arguments.
# Separate the command from its arguments.
splited_raw_command = raw_command.split(' ', 1)
arg_dict = {}
if splited_raw_command[0] == pytwis_clt_constants.CMD_REGISTER:
# register must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_REGISTER))
elif ' ' in arg_dict[pytwis_clt_constants.ARG_PASSWORD]:
raise ValueError("password can't contain spaces")
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_REGISTER,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGIN:
# login must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_LOGIN))
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_LOGIN,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGOUT:
# logout doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
# changepwd must have three arguments: old_password, new_password, and
# confirmed_new_password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.\
format(pytwis_clt_constants.CMD_CHANGE_PASSWORD))
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] !=\
arg_dict[pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD]:
raise ValueError('The confirmed new password is different from the new password')
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] ==\
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD]:
raise ValueError('The new password is the same as the old password')
print('{}: old = {}, new = {}'.format(pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD],
arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
# userprofile doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_POST:
# post must have one argument: tweet
arg_dict = {pytwis_clt_constants.ARG_TWEET: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_FOLLOW:
# follow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
# unfollow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
# followers doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
# followings doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_TIMELINE:
# timeline has either zero or one argument.
max_cnt_tweets = -1
if len(splited_raw_command) >= 2:
max_cnt_tweets = int(splited_raw_command[1])
arg_dict = {pytwis_clt_constants.ARG_MAX_TWEETS: max_cnt_tweets}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# tweetsby has either zero or one or two arguments.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: None,
pytwis_clt_constants.ARG_MAX_TWEETS: -1}
if len(splited_raw_command) >= 2:
# tweetsby has either one or two arguments.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}:d}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS),
args)
if arg_dict is None:
# tweetsby has only one argument.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: args}
arg_dict[pytwis_clt_constants.ARG_MAX_TWEETS] = -1
elif (splited_raw_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(splited_raw_command[0] == pytwis_clt_constants.CMD_QUIT):
# exit or quit doesn't have any arguments.
pass
else:
pass
if isinstance(arg_dict, parse.Result):
arg_dict = arg_dict.named
arg_dict[pytwis_clt_constants.ARG_COMMAND] = splited_raw_command[0]
return arg_dict
def pytwis_command_processor(twis, auth_secret, args):
"""Process the parsed command.
Parameters
----------
twis: Pytwis
A Pytwis instance which interacts with the Redis database of the Twitter toy clone.
auth_secret: str
The authentication secret of a logged-in user.
args:
The parsed command output by pytwis_command_parser().
"""
command = args[pytwis_clt_constants.ARG_COMMAND]
if command == pytwis_clt_constants.CMD_REGISTER:
succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't register {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGIN:
succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't log into username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGOUT:
succeeded, result = twis.logout(auth_secret[0])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY]))
else:
print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
succeeded, result = twis.change_password(auth_secret[0],
args[pytwis_clt_constants.ARG_OLD_PASSWORD],
args[pytwis_clt_constants.ARG_NEW_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Changed the password')
else:
print("Couldn't change the password with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
print('Got the user profile')
print('=' * 20)
for key, value in result.items():
print('{}: {}'.format(key, value))
print('=' * 20)
else:
print("Couldn't get the user profile with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_POST:
succeeded, result = twis.post_tweet(auth_secret[0], args['tweet'])
if succeeded:
print('Posted the tweet')
else:
print("Couldn't post the tweet with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_FOLLOW:
succeeded, result = twis.follow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't follow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_UNFOLLOW:
succeeded, result = twis.unfollow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't unfollow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS:
succeeded, result = twis.get_followers(auth_secret[0])
if succeeded:
print('Got the list of {} followers'.\
format(len(result[pytwis_constants.FOLLOWER_LIST_KEY])))
print('=' * 20)
for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]:
print('\t' + follower)
print('=' * 20)
else:
print("Couldn't get the follower list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
succeeded, result = twis.get_following(auth_secret[0])
if succeeded:
print('Got the list of {} followings'.\
format(len(result[pytwis_constants.FOLLOWING_LIST_KEY])))
print('=' * 60)
for following in result[pytwis_constants.FOLLOWING_LIST_KEY]:
print('\t' + following)
print('=' * 60)
else:
print("Couldn't get the following list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_TIMELINE:
succeeded, result = twis.get_timeline(auth_secret[0],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
if auth_secret[0] != '':
print('Got {} tweets in the user timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
else:
print('Got {} tweets in the general timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
if auth_secret[0] != '':
print("Couldn't get the user timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
else:
print("Couldn't get the general timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# Get the username of the currently logged-in user if no username is given.
if args[pytwis_clt_constants.ARG_USERNAME] is None:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY]
print('No username is given, so use the currently logged-in user {}'.\
format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't get the username of the currently logged-in user with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
return
succeeded, result = twis.get_user_tweets(auth_secret[0],
args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]),
args[pytwis_clt_constants.ARG_USERNAME]))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
print("Couldn't get the tweets posted by {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
else:
pass
def get_pytwis(epilog):
"""Connect to the Redis database and return the Pytwis instance.
Parameters
----------
epilog: str
An epilog string which will be displayed by ArgumentParser.
Returns
-------
pytwis: A Pytwis instance.
prompt: str
The prompt string which contains either the hostname and the port or the socket.
Raises
------
ValueError
If we fail to connect to the Redis server.
"""
# Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse
# the short help option '-h' for the host name.
parser = argparse.ArgumentParser(conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=\
'Connect to the Redis database of a Twitter clone and '
'then run commands to access and update the database.',
epilog=epilog)
parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',
help='''the Redis server hostname. If the option is not specified,
will be defaulted to 127.0.0.1. If the option is specified but
no value is given after the option, then the help information
is displayed instead.
''')
parser.add_argument('-p', '--port', default=6379,
help='''the Redis server port. If the option is not specified, will
be defaulted to 6379.
''')
parser.add_argument('-s', '--socket', default='',
help='''the Redis server socket (usually /tmp/redis.sock). If it is
given, it will override hostname and port. Make sure that the
unixsocket parameter is defined in your redis.conf file. It’s
commented out by default.
''')
parser.add_argument('-n', '--db', default=0,
help='''the Redis server database. If the option is not specified,
will be defaulted to 0.
''')
parser.add_argument('-a', '--password', default='',
help='''the Redis server password. If the option not specified,
will be defaulted to an empty string.
''')
args = parser.parse_args()
# If no value is given after the option '-h', then the help information is displayed.
if args.hostname is None:
parser.print_help()
return 0
if args.socket:
print('The input Redis server socket is {}'.format(args.socket))
prompt = args.socket
else:
print('The input Redis server hostname is {}.'.format(args.hostname))
print('The input Redis server port is {}.'.format(args.port))
prompt = '{}:{}'.format(args.hostname, args.port)
print('The input Redis server database is {}.'.format(args.db))
if args.password != '':
print('The input Redis server password is "{}".'.format(args.password))
else:
print('The input Redis server password is empty.')
try:
if args.socket:
twis = pytwis.Pytwis(socket=args.socket,
db=args.db,
password=args.password)
else:
twis = pytwis.Pytwis(hostname=args.hostname,
port=args.port,
db=args.db,
password=args.password)
return twis, prompt
except ValueError as excep:
print('Failed to connect to the Redis server: {}'.format(str(excep)),
file=sys.stderr)
return None, None
def pytwis_clt():
"""The main routine of this command-line tool."""
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict)
if __name__ == "__main__":
pytwis_clt()
|
renweizhukov/pytwis | pytwis/pytwis_clt.py | pytwis_command_processor | python | def pytwis_command_processor(twis, auth_secret, args):
command = args[pytwis_clt_constants.ARG_COMMAND]
if command == pytwis_clt_constants.CMD_REGISTER:
succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't register {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGIN:
succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't log into username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGOUT:
succeeded, result = twis.logout(auth_secret[0])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY]))
else:
print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
succeeded, result = twis.change_password(auth_secret[0],
args[pytwis_clt_constants.ARG_OLD_PASSWORD],
args[pytwis_clt_constants.ARG_NEW_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Changed the password')
else:
print("Couldn't change the password with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
print('Got the user profile')
print('=' * 20)
for key, value in result.items():
print('{}: {}'.format(key, value))
print('=' * 20)
else:
print("Couldn't get the user profile with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_POST:
succeeded, result = twis.post_tweet(auth_secret[0], args['tweet'])
if succeeded:
print('Posted the tweet')
else:
print("Couldn't post the tweet with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_FOLLOW:
succeeded, result = twis.follow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't follow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_UNFOLLOW:
succeeded, result = twis.unfollow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't unfollow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS:
succeeded, result = twis.get_followers(auth_secret[0])
if succeeded:
print('Got the list of {} followers'.\
format(len(result[pytwis_constants.FOLLOWER_LIST_KEY])))
print('=' * 20)
for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]:
print('\t' + follower)
print('=' * 20)
else:
print("Couldn't get the follower list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
succeeded, result = twis.get_following(auth_secret[0])
if succeeded:
print('Got the list of {} followings'.\
format(len(result[pytwis_constants.FOLLOWING_LIST_KEY])))
print('=' * 60)
for following in result[pytwis_constants.FOLLOWING_LIST_KEY]:
print('\t' + following)
print('=' * 60)
else:
print("Couldn't get the following list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_TIMELINE:
succeeded, result = twis.get_timeline(auth_secret[0],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
if auth_secret[0] != '':
print('Got {} tweets in the user timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
else:
print('Got {} tweets in the general timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
if auth_secret[0] != '':
print("Couldn't get the user timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
else:
print("Couldn't get the general timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# Get the username of the currently logged-in user if no username is given.
if args[pytwis_clt_constants.ARG_USERNAME] is None:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY]
print('No username is given, so use the currently logged-in user {}'.\
format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't get the username of the currently logged-in user with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
return
succeeded, result = twis.get_user_tweets(auth_secret[0],
args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]),
args[pytwis_clt_constants.ARG_USERNAME]))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
print("Couldn't get the tweets posted by {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
else:
pass | Process the parsed command.
Parameters
----------
twis: Pytwis
A Pytwis instance which interacts with the Redis database of the Twitter toy clone.
auth_secret: str
The authentication secret of a logged-in user.
args:
The parsed command output by pytwis_command_parser(). | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis_clt.py#L362-L515 | [
"def print_tweets(tweets):\n \"\"\"Print a list of tweets one by one separated by \"=\"s.\n\n Parameters\n ----------\n tweets: list(dict)\n A list of tweets. Each tweet is a dict containing the username of the tweet's author,\n the post time, and the tweet body.\n \"\"\"\n print('=' * 60)\n for index, tweet in enumerate(tweets):\n print('-' * 60)\n print('Tweet {}:'.format(index))\n print('Username:', tweet[pytwis_constants.USERNAME_KEY])\n print('Time:',\n datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\\\n strftime('%Y-%m-%d %H:%M:%S'))\n print('Body:\\n\\t', tweet[pytwis_constants.TWEET_BODY_KEY])\n print('-' * 60)\n print('=' * 60)\n"
] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A command-line tool which uses `pytwis` to interact with the Redis database of
a Twitter toy clone.
To see the help information,
.. code:: bash
$ ./pytwis_clt.py -h
$ ./pytwis_clt.py --help
After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
.. code:: bash
127.0.0.1:6379> register {username} {password}
* Log into a user:
.. code:: bash
127.0.0.1:6379> login {username} {password}
* Log out of a user:
.. code:: bash
127.0.0.1:6379> logout
* Change the password:
.. code:: bash
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
.. code:: bash
127.0.0.1:6379> userprofile
* Post a tweet:
.. code:: bash
127.0.0.1:6379> post {tweet}
* Follow a user:
.. code:: bash
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
.. code:: bash
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
.. code:: bash
127.0.0.1:6379> followers
* Get the following list:
.. code:: bash
127.0.0.1:6379> followings
* Get the timeline:
.. code:: bash
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
.. code:: bash
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
.. code:: bash
127.0.0.1:6379> exit
127.0.0.1:6379> quit
"""
import argparse
import datetime
import sys
import parse
if __package__:
# If this module is imported as part of the pytwis package, then use
# the relative import.
from . import pytwis_constants
from . import pytwis_clt_constants
from . import pytwis
else:
# If this module is executed locally as a script, then don't use
# the relative import.
import pytwis_constants # pylint: disable=import-error
import pytwis_clt_constants # pylint: disable=import-error
import pytwis
def validate_command(raw_command):
"""Validate the command input.
Currently we only check the number of arguments according to the command type.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Raises
------
ValueError
If the raw command input doesn't have the correct number of arguments.
"""
parsed_command = raw_command.split()
arg_count = len(parsed_command) - 1
if not parsed_command:
return
if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_REGISTER,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_LOGIN,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
if arg_count < 3:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_POST:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,
arg=pytwis_clt_constants.ARG_TWEET))
elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:
if arg_count > 1:
raise ValueError('{cmd} {{{arg}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_TIMELINE,
arg=pytwis_clt_constants.ARG_MAX_TWEETS))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
if arg_count > 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS))
elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(parsed_command[0] == pytwis_clt_constants.CMD_QUIT):
pass
else:
raise ValueError('Invalid pytwis command')
def pytwis_command_parser(raw_command):
"""Parse the command input.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Returns
-------
arg_dict: dict(str, str or int)
The parsed command output.
{'command':'register', 'username': <username>, 'password': <password>} for `register`.
Raises
------
ValueError
If the raw command can't be parsed correctly, e.g., it has an incorrect number of
arguments or incorrect arguments.
"""
validate_command(raw_command)
# Some command (e.g., logout) may not have arguments.
# Separate the command from its arguments.
splited_raw_command = raw_command.split(' ', 1)
arg_dict = {}
if splited_raw_command[0] == pytwis_clt_constants.CMD_REGISTER:
# register must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_REGISTER))
elif ' ' in arg_dict[pytwis_clt_constants.ARG_PASSWORD]:
raise ValueError("password can't contain spaces")
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_REGISTER,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGIN:
# login must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_LOGIN))
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_LOGIN,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGOUT:
# logout doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
# changepwd must have three arguments: old_password, new_password, and
# confirmed_new_password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.\
format(pytwis_clt_constants.CMD_CHANGE_PASSWORD))
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] !=\
arg_dict[pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD]:
raise ValueError('The confirmed new password is different from the new password')
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] ==\
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD]:
raise ValueError('The new password is the same as the old password')
print('{}: old = {}, new = {}'.format(pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD],
arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
# userprofile doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_POST:
# post must have one argument: tweet
arg_dict = {pytwis_clt_constants.ARG_TWEET: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_FOLLOW:
# follow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
# unfollow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
# followers doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
# followings doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_TIMELINE:
# timeline has either zero or one argument.
max_cnt_tweets = -1
if len(splited_raw_command) >= 2:
max_cnt_tweets = int(splited_raw_command[1])
arg_dict = {pytwis_clt_constants.ARG_MAX_TWEETS: max_cnt_tweets}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# tweetsby has either zero or one or two arguments.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: None,
pytwis_clt_constants.ARG_MAX_TWEETS: -1}
if len(splited_raw_command) >= 2:
# tweetsby has either one or two arguments.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}:d}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS),
args)
if arg_dict is None:
# tweetsby has only one argument.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: args}
arg_dict[pytwis_clt_constants.ARG_MAX_TWEETS] = -1
elif (splited_raw_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(splited_raw_command[0] == pytwis_clt_constants.CMD_QUIT):
# exit or quit doesn't have any arguments.
pass
else:
pass
if isinstance(arg_dict, parse.Result):
arg_dict = arg_dict.named
arg_dict[pytwis_clt_constants.ARG_COMMAND] = splited_raw_command[0]
return arg_dict
def print_tweets(tweets):
"""Print a list of tweets one by one separated by "="s.
Parameters
----------
tweets: list(dict)
A list of tweets. Each tweet is a dict containing the username of the tweet's author,
the post time, and the tweet body.
"""
print('=' * 60)
for index, tweet in enumerate(tweets):
print('-' * 60)
print('Tweet {}:'.format(index))
print('Username:', tweet[pytwis_constants.USERNAME_KEY])
print('Time:',
datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\
strftime('%Y-%m-%d %H:%M:%S'))
print('Body:\n\t', tweet[pytwis_constants.TWEET_BODY_KEY])
print('-' * 60)
print('=' * 60)
def get_pytwis(epilog):
"""Connect to the Redis database and return the Pytwis instance.
Parameters
----------
epilog: str
An epilog string which will be displayed by ArgumentParser.
Returns
-------
pytwis: A Pytwis instance.
prompt: str
The prompt string which contains either the hostname and the port or the socket.
Raises
------
ValueError
If we fail to connect to the Redis server.
"""
# Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse
# the short help option '-h' for the host name.
parser = argparse.ArgumentParser(conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=\
'Connect to the Redis database of a Twitter clone and '
'then run commands to access and update the database.',
epilog=epilog)
parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',
help='''the Redis server hostname. If the option is not specified,
will be defaulted to 127.0.0.1. If the option is specified but
no value is given after the option, then the help information
is displayed instead.
''')
parser.add_argument('-p', '--port', default=6379,
help='''the Redis server port. If the option is not specified, will
be defaulted to 6379.
''')
parser.add_argument('-s', '--socket', default='',
help='''the Redis server socket (usually /tmp/redis.sock). If it is
given, it will override hostname and port. Make sure that the
unixsocket parameter is defined in your redis.conf file. It’s
commented out by default.
''')
parser.add_argument('-n', '--db', default=0,
help='''the Redis server database. If the option is not specified,
will be defaulted to 0.
''')
parser.add_argument('-a', '--password', default='',
help='''the Redis server password. If the option not specified,
will be defaulted to an empty string.
''')
args = parser.parse_args()
# If no value is given after the option '-h', then the help information is displayed.
if args.hostname is None:
parser.print_help()
return 0
if args.socket:
print('The input Redis server socket is {}'.format(args.socket))
prompt = args.socket
else:
print('The input Redis server hostname is {}.'.format(args.hostname))
print('The input Redis server port is {}.'.format(args.port))
prompt = '{}:{}'.format(args.hostname, args.port)
print('The input Redis server database is {}.'.format(args.db))
if args.password != '':
print('The input Redis server password is "{}".'.format(args.password))
else:
print('The input Redis server password is empty.')
try:
if args.socket:
twis = pytwis.Pytwis(socket=args.socket,
db=args.db,
password=args.password)
else:
twis = pytwis.Pytwis(hostname=args.hostname,
port=args.port,
db=args.db,
password=args.password)
return twis, prompt
except ValueError as excep:
print('Failed to connect to the Redis server: {}'.format(str(excep)),
file=sys.stderr)
return None, None
def pytwis_clt():
"""The main routine of this command-line tool."""
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict)
if __name__ == "__main__":
pytwis_clt()
|
renweizhukov/pytwis | pytwis/pytwis_clt.py | get_pytwis | python | def get_pytwis(epilog):
# Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse
# the short help option '-h' for the host name.
parser = argparse.ArgumentParser(conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=\
'Connect to the Redis database of a Twitter clone and '
'then run commands to access and update the database.',
epilog=epilog)
parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',
help='''the Redis server hostname. If the option is not specified,
will be defaulted to 127.0.0.1. If the option is specified but
no value is given after the option, then the help information
is displayed instead.
''')
parser.add_argument('-p', '--port', default=6379,
help='''the Redis server port. If the option is not specified, will
be defaulted to 6379.
''')
parser.add_argument('-s', '--socket', default='',
help='''the Redis server socket (usually /tmp/redis.sock). If it is
given, it will override hostname and port. Make sure that the
unixsocket parameter is defined in your redis.conf file. It’s
commented out by default.
''')
parser.add_argument('-n', '--db', default=0,
help='''the Redis server database. If the option is not specified,
will be defaulted to 0.
''')
parser.add_argument('-a', '--password', default='',
help='''the Redis server password. If the option not specified,
will be defaulted to an empty string.
''')
args = parser.parse_args()
# If no value is given after the option '-h', then the help information is displayed.
if args.hostname is None:
parser.print_help()
return 0
if args.socket:
print('The input Redis server socket is {}'.format(args.socket))
prompt = args.socket
else:
print('The input Redis server hostname is {}.'.format(args.hostname))
print('The input Redis server port is {}.'.format(args.port))
prompt = '{}:{}'.format(args.hostname, args.port)
print('The input Redis server database is {}.'.format(args.db))
if args.password != '':
print('The input Redis server password is "{}".'.format(args.password))
else:
print('The input Redis server password is empty.')
try:
if args.socket:
twis = pytwis.Pytwis(socket=args.socket,
db=args.db,
password=args.password)
else:
twis = pytwis.Pytwis(hostname=args.hostname,
port=args.port,
db=args.db,
password=args.password)
return twis, prompt
except ValueError as excep:
print('Failed to connect to the Redis server: {}'.format(str(excep)),
file=sys.stderr)
return None, None | Connect to the Redis database and return the Pytwis instance.
Parameters
----------
epilog: str
An epilog string which will be displayed by ArgumentParser.
Returns
-------
pytwis: A Pytwis instance.
prompt: str
The prompt string which contains either the hostname and the port or the socket.
Raises
------
ValueError
If we fail to connect to the Redis server. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis_clt.py#L518-L604 | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A command-line tool which uses `pytwis` to interact with the Redis database of
a Twitter toy clone.
To see the help information,
.. code:: bash
$ ./pytwis_clt.py -h
$ ./pytwis_clt.py --help
After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
.. code:: bash
127.0.0.1:6379> register {username} {password}
* Log into a user:
.. code:: bash
127.0.0.1:6379> login {username} {password}
* Log out of a user:
.. code:: bash
127.0.0.1:6379> logout
* Change the password:
.. code:: bash
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
.. code:: bash
127.0.0.1:6379> userprofile
* Post a tweet:
.. code:: bash
127.0.0.1:6379> post {tweet}
* Follow a user:
.. code:: bash
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
.. code:: bash
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
.. code:: bash
127.0.0.1:6379> followers
* Get the following list:
.. code:: bash
127.0.0.1:6379> followings
* Get the timeline:
.. code:: bash
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
.. code:: bash
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
.. code:: bash
127.0.0.1:6379> exit
127.0.0.1:6379> quit
"""
import argparse
import datetime
import sys
import parse
if __package__:
# If this module is imported as part of the pytwis package, then use
# the relative import.
from . import pytwis_constants
from . import pytwis_clt_constants
from . import pytwis
else:
# If this module is executed locally as a script, then don't use
# the relative import.
import pytwis_constants # pylint: disable=import-error
import pytwis_clt_constants # pylint: disable=import-error
import pytwis
def validate_command(raw_command):
"""Validate the command input.
Currently we only check the number of arguments according to the command type.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Raises
------
ValueError
If the raw command input doesn't have the correct number of arguments.
"""
parsed_command = raw_command.split()
arg_count = len(parsed_command) - 1
if not parsed_command:
return
if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_REGISTER,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_LOGIN,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
if arg_count < 3:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_POST:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,
arg=pytwis_clt_constants.ARG_TWEET))
elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:
if arg_count > 1:
raise ValueError('{cmd} {{{arg}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_TIMELINE,
arg=pytwis_clt_constants.ARG_MAX_TWEETS))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
if arg_count > 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS))
elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(parsed_command[0] == pytwis_clt_constants.CMD_QUIT):
pass
else:
raise ValueError('Invalid pytwis command')
def pytwis_command_parser(raw_command):
"""Parse the command input.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Returns
-------
arg_dict: dict(str, str or int)
The parsed command output.
{'command':'register', 'username': <username>, 'password': <password>} for `register`.
Raises
------
ValueError
If the raw command can't be parsed correctly, e.g., it has an incorrect number of
arguments or incorrect arguments.
"""
validate_command(raw_command)
# Some command (e.g., logout) may not have arguments.
# Separate the command from its arguments.
splited_raw_command = raw_command.split(' ', 1)
arg_dict = {}
if splited_raw_command[0] == pytwis_clt_constants.CMD_REGISTER:
# register must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_REGISTER))
elif ' ' in arg_dict[pytwis_clt_constants.ARG_PASSWORD]:
raise ValueError("password can't contain spaces")
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_REGISTER,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGIN:
# login must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_LOGIN))
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_LOGIN,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGOUT:
# logout doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
# changepwd must have three arguments: old_password, new_password, and
# confirmed_new_password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.\
format(pytwis_clt_constants.CMD_CHANGE_PASSWORD))
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] !=\
arg_dict[pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD]:
raise ValueError('The confirmed new password is different from the new password')
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] ==\
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD]:
raise ValueError('The new password is the same as the old password')
print('{}: old = {}, new = {}'.format(pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD],
arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
# userprofile doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_POST:
# post must have one argument: tweet
arg_dict = {pytwis_clt_constants.ARG_TWEET: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_FOLLOW:
# follow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
# unfollow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
# followers doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
# followings doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_TIMELINE:
# timeline has either zero or one argument.
max_cnt_tweets = -1
if len(splited_raw_command) >= 2:
max_cnt_tweets = int(splited_raw_command[1])
arg_dict = {pytwis_clt_constants.ARG_MAX_TWEETS: max_cnt_tweets}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# tweetsby has either zero or one or two arguments.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: None,
pytwis_clt_constants.ARG_MAX_TWEETS: -1}
if len(splited_raw_command) >= 2:
# tweetsby has either one or two arguments.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}:d}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS),
args)
if arg_dict is None:
# tweetsby has only one argument.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: args}
arg_dict[pytwis_clt_constants.ARG_MAX_TWEETS] = -1
elif (splited_raw_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(splited_raw_command[0] == pytwis_clt_constants.CMD_QUIT):
# exit or quit doesn't have any arguments.
pass
else:
pass
if isinstance(arg_dict, parse.Result):
arg_dict = arg_dict.named
arg_dict[pytwis_clt_constants.ARG_COMMAND] = splited_raw_command[0]
return arg_dict
def print_tweets(tweets):
"""Print a list of tweets one by one separated by "="s.
Parameters
----------
tweets: list(dict)
A list of tweets. Each tweet is a dict containing the username of the tweet's author,
the post time, and the tweet body.
"""
print('=' * 60)
for index, tweet in enumerate(tweets):
print('-' * 60)
print('Tweet {}:'.format(index))
print('Username:', tweet[pytwis_constants.USERNAME_KEY])
print('Time:',
datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\
strftime('%Y-%m-%d %H:%M:%S'))
print('Body:\n\t', tweet[pytwis_constants.TWEET_BODY_KEY])
print('-' * 60)
print('=' * 60)
def pytwis_command_processor(twis, auth_secret, args):
"""Process the parsed command.
Parameters
----------
twis: Pytwis
A Pytwis instance which interacts with the Redis database of the Twitter toy clone.
auth_secret: str
The authentication secret of a logged-in user.
args:
The parsed command output by pytwis_command_parser().
"""
command = args[pytwis_clt_constants.ARG_COMMAND]
if command == pytwis_clt_constants.CMD_REGISTER:
succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't register {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGIN:
succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't log into username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGOUT:
succeeded, result = twis.logout(auth_secret[0])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY]))
else:
print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
succeeded, result = twis.change_password(auth_secret[0],
args[pytwis_clt_constants.ARG_OLD_PASSWORD],
args[pytwis_clt_constants.ARG_NEW_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Changed the password')
else:
print("Couldn't change the password with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
print('Got the user profile')
print('=' * 20)
for key, value in result.items():
print('{}: {}'.format(key, value))
print('=' * 20)
else:
print("Couldn't get the user profile with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_POST:
succeeded, result = twis.post_tweet(auth_secret[0], args['tweet'])
if succeeded:
print('Posted the tweet')
else:
print("Couldn't post the tweet with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_FOLLOW:
succeeded, result = twis.follow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't follow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_UNFOLLOW:
succeeded, result = twis.unfollow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't unfollow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS:
succeeded, result = twis.get_followers(auth_secret[0])
if succeeded:
print('Got the list of {} followers'.\
format(len(result[pytwis_constants.FOLLOWER_LIST_KEY])))
print('=' * 20)
for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]:
print('\t' + follower)
print('=' * 20)
else:
print("Couldn't get the follower list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
succeeded, result = twis.get_following(auth_secret[0])
if succeeded:
print('Got the list of {} followings'.\
format(len(result[pytwis_constants.FOLLOWING_LIST_KEY])))
print('=' * 60)
for following in result[pytwis_constants.FOLLOWING_LIST_KEY]:
print('\t' + following)
print('=' * 60)
else:
print("Couldn't get the following list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_TIMELINE:
succeeded, result = twis.get_timeline(auth_secret[0],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
if auth_secret[0] != '':
print('Got {} tweets in the user timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
else:
print('Got {} tweets in the general timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
if auth_secret[0] != '':
print("Couldn't get the user timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
else:
print("Couldn't get the general timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# Get the username of the currently logged-in user if no username is given.
if args[pytwis_clt_constants.ARG_USERNAME] is None:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY]
print('No username is given, so use the currently logged-in user {}'.\
format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't get the username of the currently logged-in user with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
return
succeeded, result = twis.get_user_tweets(auth_secret[0],
args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]),
args[pytwis_clt_constants.ARG_USERNAME]))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
print("Couldn't get the tweets posted by {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
else:
pass
def pytwis_clt():
"""The main routine of this command-line tool."""
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict)
if __name__ == "__main__":
pytwis_clt()
|
renweizhukov/pytwis | pytwis/pytwis_clt.py | pytwis_clt | python | def pytwis_clt():
epilog = '''After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
127.0.0.1:6379> register {username} {password}
* Log into a user:
127.0.0.1:6379> login {username} {password}
* Log out of a user:
127.0.0.1:6379> logout
* Change the password:
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
127.0.0.1:6379> userprofile
* Post a tweet:
127.0.0.1:6379> post {tweet}
* Follow a user:
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
127.0.0.1:6379> followers
* Get the following list:
127.0.0.1:6379> followings
* Get the timeline:
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
127.0.0.1:6379> exit
127.0.0.1:6379> quit
'''
twis, prompt = get_pytwis(epilog)
if twis is None:
return -1
auth_secret = ['']
while True:
try:
arg_dict = pytwis_command_parser(
input('Please enter a command '
'(register, login, logout, changepwd, userprofile, post, '
'follow, unfollow, followers, followings, timeline, tweetsby):\n{}> '\
.format(prompt)))
if arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_EXIT \
or arg_dict[pytwis_clt_constants.ARG_COMMAND] == pytwis_clt_constants.CMD_QUIT:
# Log out of the current user before exiting.
if auth_secret[0]:
pytwis_command_processor(twis, auth_secret,
{pytwis_clt_constants.ARG_COMMAND:
pytwis_clt_constants.CMD_LOGOUT})
print('pytwis is exiting.')
return 0
except ValueError as excep:
print('Invalid pytwis command: {}'.format(str(excep)),
file=sys.stderr)
continue
pytwis_command_processor(twis, auth_secret, arg_dict) | The main routine of this command-line tool. | train | https://github.com/renweizhukov/pytwis/blob/1bc45b038d7e5343824c520f89f644bbd6faab0a/pytwis/pytwis_clt.py#L606-L699 | [
"def get_pytwis(epilog):\n \"\"\"Connect to the Redis database and return the Pytwis instance.\n\n Parameters\n ----------\n epilog: str\n An epilog string which will be displayed by ArgumentParser.\n\n Returns\n -------\n pytwis: A Pytwis instance.\n prompt: str\n The prompt string which contains either the hostname and the port or the socket.\n Raises\n ------\n ValueError\n If we fail to connect to the Redis server.\n \"\"\"\n # Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse\n # the short help option '-h' for the host name.\n parser = argparse.ArgumentParser(conflict_handler=\"resolve\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\\\n 'Connect to the Redis database of a Twitter clone and '\n 'then run commands to access and update the database.',\n epilog=epilog)\n parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',\n help='''the Redis server hostname. If the option is not specified,\n will be defaulted to 127.0.0.1. If the option is specified but\n no value is given after the option, then the help information\n is displayed instead.\n ''')\n parser.add_argument('-p', '--port', default=6379,\n help='''the Redis server port. If the option is not specified, will\n be defaulted to 6379.\n ''')\n parser.add_argument('-s', '--socket', default='',\n help='''the Redis server socket (usually /tmp/redis.sock). If it is\n given, it will override hostname and port. Make sure that the\n unixsocket parameter is defined in your redis.conf file. It’s\n commented out by default.\n ''')\n parser.add_argument('-n', '--db', default=0,\n help='''the Redis server database. If the option is not specified,\n will be defaulted to 0.\n ''')\n parser.add_argument('-a', '--password', default='',\n help='''the Redis server password. If the option not specified,\n will be defaulted to an empty string.\n ''')\n\n args = parser.parse_args()\n\n # If no value is given after the option '-h', then the help information is displayed.\n if args.hostname is None:\n parser.print_help()\n return 0\n\n if args.socket:\n print('The input Redis server socket is {}'.format(args.socket))\n prompt = args.socket\n else:\n print('The input Redis server hostname is {}.'.format(args.hostname))\n print('The input Redis server port is {}.'.format(args.port))\n prompt = '{}:{}'.format(args.hostname, args.port)\n print('The input Redis server database is {}.'.format(args.db))\n if args.password != '':\n print('The input Redis server password is \"{}\".'.format(args.password))\n else:\n print('The input Redis server password is empty.')\n\n try:\n if args.socket:\n twis = pytwis.Pytwis(socket=args.socket,\n db=args.db,\n password=args.password)\n else:\n twis = pytwis.Pytwis(hostname=args.hostname,\n port=args.port,\n db=args.db,\n password=args.password)\n return twis, prompt\n\n except ValueError as excep:\n print('Failed to connect to the Redis server: {}'.format(str(excep)),\n file=sys.stderr)\n return None, None\n"
] | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""A command-line tool which uses `pytwis` to interact with the Redis database of
a Twitter toy clone.
To see the help information,
.. code:: bash
$ ./pytwis_clt.py -h
$ ./pytwis_clt.py --help
After launching `pytwis_clt.py`, you will be able to use the following commands:
* Register a new user:
.. code:: bash
127.0.0.1:6379> register {username} {password}
* Log into a user:
.. code:: bash
127.0.0.1:6379> login {username} {password}
* Log out of a user:
.. code:: bash
127.0.0.1:6379> logout
* Change the password:
.. code:: bash
127.0.0.1:6379> changepwd {old_password} {new_password} {confirmed_new_password}
* Get the profile of the current user:
.. code:: bash
127.0.0.1:6379> userprofile
* Post a tweet:
.. code:: bash
127.0.0.1:6379> post {tweet}
* Follow a user:
.. code:: bash
127.0.0.1:6379> follow {followee_username}
* Unfollow a user:
.. code:: bash
127.0.0.1:6379> unfollow {followee_username}
* Get the follower list:
.. code:: bash
127.0.0.1:6379> followers
* Get the following list:
.. code:: bash
127.0.0.1:6379> followings
* Get the timeline:
.. code:: bash
127.0.0.1:6379> timeline
127.0.0.1:6379> timeline {max_tweet_count}
Note that if a user is logged in, `timeline` will return the user timeline;
otherwise `timeline` will return the general timeline.
* Get the tweets posted by a user:
.. code:: bash
127.0.0.1:6379> tweetsby
127.0.0.1:6379> tweetsby {username}
127.0.0.1:6379> tweetsby {username} {max_tweet_count}
Note that if no username is given, `tweetsby` will return the tweets posted
by the currently logged-in user.
* Exit the program:
.. code:: bash
127.0.0.1:6379> exit
127.0.0.1:6379> quit
"""
import argparse
import datetime
import sys
import parse
if __package__:
# If this module is imported as part of the pytwis package, then use
# the relative import.
from . import pytwis_constants
from . import pytwis_clt_constants
from . import pytwis
else:
# If this module is executed locally as a script, then don't use
# the relative import.
import pytwis_constants # pylint: disable=import-error
import pytwis_clt_constants # pylint: disable=import-error
import pytwis
def validate_command(raw_command):
"""Validate the command input.
Currently we only check the number of arguments according to the command type.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Raises
------
ValueError
If the raw command input doesn't have the correct number of arguments.
"""
parsed_command = raw_command.split()
arg_count = len(parsed_command) - 1
if not parsed_command:
return
if parsed_command[0] == pytwis_clt_constants.CMD_REGISTER:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_REGISTER,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGIN:
if arg_count < 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}}'.\
format(cmd=pytwis_clt_constants.CMD_LOGIN,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_LOGOUT:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
if arg_count < 3:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(cmd=pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_POST:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_POST,
arg=pytwis_clt_constants.ARG_TWEET))
elif parsed_command[0] == pytwis_clt_constants.CMD_FOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_FOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
if arg_count < 1:
raise ValueError('{cmd} {{{arg}}}'.format(cmd=pytwis_clt_constants.CMD_UNFOLLOW,
arg=pytwis_clt_constants.ARG_FOLLOWEE))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
pass
elif parsed_command[0] == pytwis_clt_constants.CMD_TIMELINE:
if arg_count > 1:
raise ValueError('{cmd} {{{arg}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_TIMELINE,
arg=pytwis_clt_constants.ARG_MAX_TWEETS))
elif parsed_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
if arg_count > 2:
raise ValueError('{cmd} {{{arg1}}} {{{arg2}}} or {cmd} {{{arg1}}} or {cmd}'.\
format(cmd=pytwis_clt_constants.CMD_GET_USER_TWEETS,
arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS))
elif (parsed_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(parsed_command[0] == pytwis_clt_constants.CMD_QUIT):
pass
else:
raise ValueError('Invalid pytwis command')
def pytwis_command_parser(raw_command):
"""Parse the command input.
Parameters
----------
raw_command: str
The raw command input, e.g., `register xxxxxx yyyyyy`.
Returns
-------
arg_dict: dict(str, str or int)
The parsed command output.
{'command':'register', 'username': <username>, 'password': <password>} for `register`.
Raises
------
ValueError
If the raw command can't be parsed correctly, e.g., it has an incorrect number of
arguments or incorrect arguments.
"""
validate_command(raw_command)
# Some command (e.g., logout) may not have arguments.
# Separate the command from its arguments.
splited_raw_command = raw_command.split(' ', 1)
arg_dict = {}
if splited_raw_command[0] == pytwis_clt_constants.CMD_REGISTER:
# register must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_REGISTER))
elif ' ' in arg_dict[pytwis_clt_constants.ARG_PASSWORD]:
raise ValueError("password can't contain spaces")
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_REGISTER,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGIN:
# login must have two arguments: username and password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.format(pytwis_clt_constants.CMD_LOGIN))
print('{}: username = {}, password = {}'.\
format(pytwis_clt_constants.CMD_LOGIN,
arg_dict[pytwis_clt_constants.ARG_USERNAME],
arg_dict[pytwis_clt_constants.ARG_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_LOGOUT:
# logout doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
# changepwd must have three arguments: old_password, new_password, and
# confirmed_new_password.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}}} {{{arg3}}}'.\
format(arg1=pytwis_clt_constants.ARG_OLD_PASSWORD,
arg2=pytwis_clt_constants.ARG_NEW_PASSWORD,
arg3=pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD),
args)
if arg_dict is None:
raise ValueError('{} has incorrect arguments'.\
format(pytwis_clt_constants.CMD_CHANGE_PASSWORD))
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] !=\
arg_dict[pytwis_clt_constants.ARG_CONFIRMED_NEW_PASSWORD]:
raise ValueError('The confirmed new password is different from the new password')
elif arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD] ==\
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD]:
raise ValueError('The new password is the same as the old password')
print('{}: old = {}, new = {}'.format(pytwis_clt_constants.CMD_CHANGE_PASSWORD,
arg_dict[pytwis_clt_constants.ARG_OLD_PASSWORD],
arg_dict[pytwis_clt_constants.ARG_NEW_PASSWORD]))
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_PROFILE:
# userprofile doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_POST:
# post must have one argument: tweet
arg_dict = {pytwis_clt_constants.ARG_TWEET: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_FOLLOW:
# follow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_UNFOLLOW:
# unfollow must have one argument: followee.
arg_dict = {pytwis_clt_constants.ARG_FOLLOWEE: splited_raw_command[1]}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWERS:
# followers doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
# followings doesn't have any arguments.
pass
elif splited_raw_command[0] == pytwis_clt_constants.CMD_TIMELINE:
# timeline has either zero or one argument.
max_cnt_tweets = -1
if len(splited_raw_command) >= 2:
max_cnt_tweets = int(splited_raw_command[1])
arg_dict = {pytwis_clt_constants.ARG_MAX_TWEETS: max_cnt_tweets}
elif splited_raw_command[0] == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# tweetsby has either zero or one or two arguments.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: None,
pytwis_clt_constants.ARG_MAX_TWEETS: -1}
if len(splited_raw_command) >= 2:
# tweetsby has either one or two arguments.
args = splited_raw_command[1]
arg_dict = parse.parse('{{{arg1}}} {{{arg2}:d}}'.\
format(arg1=pytwis_clt_constants.ARG_USERNAME,
arg2=pytwis_clt_constants.ARG_MAX_TWEETS),
args)
if arg_dict is None:
# tweetsby has only one argument.
arg_dict = {pytwis_clt_constants.ARG_USERNAME: args}
arg_dict[pytwis_clt_constants.ARG_MAX_TWEETS] = -1
elif (splited_raw_command[0] == pytwis_clt_constants.CMD_EXIT) or\
(splited_raw_command[0] == pytwis_clt_constants.CMD_QUIT):
# exit or quit doesn't have any arguments.
pass
else:
pass
if isinstance(arg_dict, parse.Result):
arg_dict = arg_dict.named
arg_dict[pytwis_clt_constants.ARG_COMMAND] = splited_raw_command[0]
return arg_dict
def print_tweets(tweets):
"""Print a list of tweets one by one separated by "="s.
Parameters
----------
tweets: list(dict)
A list of tweets. Each tweet is a dict containing the username of the tweet's author,
the post time, and the tweet body.
"""
print('=' * 60)
for index, tweet in enumerate(tweets):
print('-' * 60)
print('Tweet {}:'.format(index))
print('Username:', tweet[pytwis_constants.USERNAME_KEY])
print('Time:',
datetime.datetime.fromtimestamp(int(tweet[pytwis_constants.TWEET_UNIXTIME_KEY])).\
strftime('%Y-%m-%d %H:%M:%S'))
print('Body:\n\t', tweet[pytwis_constants.TWEET_BODY_KEY])
print('-' * 60)
print('=' * 60)
def pytwis_command_processor(twis, auth_secret, args):
"""Process the parsed command.
Parameters
----------
twis: Pytwis
A Pytwis instance which interacts with the Redis database of the Twitter toy clone.
auth_secret: str
The authentication secret of a logged-in user.
args:
The parsed command output by pytwis_command_parser().
"""
command = args[pytwis_clt_constants.ARG_COMMAND]
if command == pytwis_clt_constants.CMD_REGISTER:
succeeded, result = twis.register(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
print('Registered {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't register {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGIN:
succeeded, result = twis.login(args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged into username {}'.format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't log into username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_LOGOUT:
succeeded, result = twis.logout(auth_secret[0])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Logged out of username {}'.format(result[pytwis_constants.USERNAME_KEY]))
else:
print("Couldn't log out with error = {}".format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_CHANGE_PASSWORD:
succeeded, result = twis.change_password(auth_secret[0],
args[pytwis_clt_constants.ARG_OLD_PASSWORD],
args[pytwis_clt_constants.ARG_NEW_PASSWORD])
if succeeded:
auth_secret[0] = result[pytwis_constants.AUTH_KEY]
print('Changed the password')
else:
print("Couldn't change the password with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_PROFILE:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
print('Got the user profile')
print('=' * 20)
for key, value in result.items():
print('{}: {}'.format(key, value))
print('=' * 20)
else:
print("Couldn't get the user profile with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_POST:
succeeded, result = twis.post_tweet(auth_secret[0], args['tweet'])
if succeeded:
print('Posted the tweet')
else:
print("Couldn't post the tweet with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_FOLLOW:
succeeded, result = twis.follow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Followed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't follow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_UNFOLLOW:
succeeded, result = twis.unfollow(auth_secret[0],
args[pytwis_clt_constants.ARG_FOLLOWEE])
if succeeded:
print('Unfollowed username {}'.format(args[pytwis_clt_constants.ARG_FOLLOWEE]))
else:
print("Couldn't unfollow the username {} with error = {}".\
format(args[pytwis_clt_constants.ARG_FOLLOWEE],
result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWERS:
succeeded, result = twis.get_followers(auth_secret[0])
if succeeded:
print('Got the list of {} followers'.\
format(len(result[pytwis_constants.FOLLOWER_LIST_KEY])))
print('=' * 20)
for follower in result[pytwis_constants.FOLLOWER_LIST_KEY]:
print('\t' + follower)
print('=' * 20)
else:
print("Couldn't get the follower list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_FOLLOWINGS:
succeeded, result = twis.get_following(auth_secret[0])
if succeeded:
print('Got the list of {} followings'.\
format(len(result[pytwis_constants.FOLLOWING_LIST_KEY])))
print('=' * 60)
for following in result[pytwis_constants.FOLLOWING_LIST_KEY]:
print('\t' + following)
print('=' * 60)
else:
print("Couldn't get the following list with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_TIMELINE:
succeeded, result = twis.get_timeline(auth_secret[0],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
if auth_secret[0] != '':
print('Got {} tweets in the user timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
else:
print('Got {} tweets in the general timeline'.\
format(len(result[pytwis_constants.TWEETS_KEY])))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
if auth_secret[0] != '':
print("Couldn't get the user timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
else:
print("Couldn't get the general timeline with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
elif command == pytwis_clt_constants.CMD_GET_USER_TWEETS:
# Get the username of the currently logged-in user if no username is given.
if args[pytwis_clt_constants.ARG_USERNAME] is None:
succeeded, result = twis.get_user_profile(auth_secret[0])
if succeeded:
args[pytwis_clt_constants.ARG_USERNAME] = result[pytwis_constants.USERNAME_KEY]
print('No username is given, so use the currently logged-in user {}'.\
format(args[pytwis_clt_constants.ARG_USERNAME]))
else:
print("Couldn't get the username of the currently logged-in user with error = {}".\
format(result[pytwis_constants.ERROR_KEY]))
return
succeeded, result = twis.get_user_tweets(auth_secret[0],
args[pytwis_clt_constants.ARG_USERNAME],
args[pytwis_clt_constants.ARG_MAX_TWEETS])
if succeeded:
print('Got {} tweets posted by {}'.format(len(result[pytwis_constants.TWEETS_KEY]),
args[pytwis_clt_constants.ARG_USERNAME]))
print_tweets(result[pytwis_constants.TWEETS_KEY])
else:
print("Couldn't get the tweets posted by {} with error = {}".\
format(args[pytwis_clt_constants.ARG_USERNAME],
result[pytwis_constants.ERROR_KEY]))
else:
pass
def get_pytwis(epilog):
"""Connect to the Redis database and return the Pytwis instance.
Parameters
----------
epilog: str
An epilog string which will be displayed by ArgumentParser.
Returns
-------
pytwis: A Pytwis instance.
prompt: str
The prompt string which contains either the hostname and the port or the socket.
Raises
------
ValueError
If we fail to connect to the Redis server.
"""
# Note that we set the conflict handler of ArgumentParser to 'resolve' because we reuse
# the short help option '-h' for the host name.
parser = argparse.ArgumentParser(conflict_handler="resolve",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=\
'Connect to the Redis database of a Twitter clone and '
'then run commands to access and update the database.',
epilog=epilog)
parser.add_argument('-h', '--hostname', nargs='?', default='127.0.0.1',
help='''the Redis server hostname. If the option is not specified,
will be defaulted to 127.0.0.1. If the option is specified but
no value is given after the option, then the help information
is displayed instead.
''')
parser.add_argument('-p', '--port', default=6379,
help='''the Redis server port. If the option is not specified, will
be defaulted to 6379.
''')
parser.add_argument('-s', '--socket', default='',
help='''the Redis server socket (usually /tmp/redis.sock). If it is
given, it will override hostname and port. Make sure that the
unixsocket parameter is defined in your redis.conf file. It’s
commented out by default.
''')
parser.add_argument('-n', '--db', default=0,
help='''the Redis server database. If the option is not specified,
will be defaulted to 0.
''')
parser.add_argument('-a', '--password', default='',
help='''the Redis server password. If the option not specified,
will be defaulted to an empty string.
''')
args = parser.parse_args()
# If no value is given after the option '-h', then the help information is displayed.
if args.hostname is None:
parser.print_help()
return 0
if args.socket:
print('The input Redis server socket is {}'.format(args.socket))
prompt = args.socket
else:
print('The input Redis server hostname is {}.'.format(args.hostname))
print('The input Redis server port is {}.'.format(args.port))
prompt = '{}:{}'.format(args.hostname, args.port)
print('The input Redis server database is {}.'.format(args.db))
if args.password != '':
print('The input Redis server password is "{}".'.format(args.password))
else:
print('The input Redis server password is empty.')
try:
if args.socket:
twis = pytwis.Pytwis(socket=args.socket,
db=args.db,
password=args.password)
else:
twis = pytwis.Pytwis(hostname=args.hostname,
port=args.port,
db=args.db,
password=args.password)
return twis, prompt
except ValueError as excep:
print('Failed to connect to the Redis server: {}'.format(str(excep)),
file=sys.stderr)
return None, None
if __name__ == "__main__":
pytwis_clt()
|
msztolcman/versionner | versionner/cli.py | parse_args | python | def parse_args(args, cfg):
prog = pathlib.Path(sys.argv[0]).parts[-1].replace('.py', '')
prog_version = "%%(prog)s %s" % versionner.__version__
# pylint: disable=invalid-name
p = argparse.ArgumentParser(prog=prog, description='Helps manipulating version of the project')
p.add_argument('--file', '-f', dest='version_file', type=str,
default=cfg.version_file,
help="path to file where version is saved")
p.add_argument('--version', '-v', action="version", version=prog_version)
p.add_argument('--date-format', type=str,
default=cfg.date_format,
help="Date format used in project files")
p.add_argument('--verbose', action="store_true",
help="Be more verbose if it's possible")
sub = p.add_subparsers(dest='command')
p_init = sub.add_parser('init', aliases=commands.get_aliases_for('init'),
help="Create new version file")
p_init.add_argument('value', nargs='?', type=str,
default=cfg.default_init_version,
help="Initial version")
p_init.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_init.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_init.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `up` command (only if there is no changes in repo before)")
p_up = sub.add_parser('up', aliases=commands.get_aliases_for('up'),
help="Increase version")
p_up.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_up.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_up.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `up` command (only if there is no changes in repo before)")
p_up.add_argument('value', nargs='?', type=int,
default=cfg.default_increase_value,
help="Increase version by this value (default: %d)" % cfg.default_increase_value)
p_up_gr = p_up.add_mutually_exclusive_group()
p_up_gr.add_argument('--major', '-j', action="store_true",
help="increase major part of version" + (" (project default)" if cfg.up_part == 'major' else ""))
p_up_gr.add_argument('--minor', '-n', action="store_true",
help="increase minor part of version" + (" (project default)" if cfg.up_part == 'minor' else ""))
p_up_gr.add_argument('--patch', '-p', action="store_true",
help="increase patch part of version" + (" (project default)" if cfg.up_part == 'patch' else ""))
p_set = sub.add_parser('set', aliases=commands.get_aliases_for('set'),
help="Set version to specified one")
p_set.add_argument('--major', '-j', type=int,
help="set major part of version to MAJOR")
p_set.add_argument('--minor', '-n', type=int,
help="set minor part of version to MINOR")
p_set.add_argument('--patch', '-p', type=int,
help="set patch part of version to PATCH")
p_set.add_argument('--prerelease', '-r', type=str,
help="set prerelease part of version to PRERELEASE")
p_set.add_argument('--build', '-b', type=str,
help="set build part of version to BUILD")
p_set.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_set.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_set.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `set` command (only if there is no changes in repo before)")
p_set.add_argument('value', nargs='?', type=str,
help="set version to this value")
p_tag = sub.add_parser('tag', aliases=commands.get_aliases_for('tag'),
help="Create VCS tag with current version")
p_tag.add_argument('--vcs-tag-param', dest='vcs_tag_params', type=str, action="append",
help="Additional params for VCS for \"tag\" command")
sub.add_parser('read', aliases=commands.get_aliases_for('read'),
help="Read current version")
args = p.parse_args(args)
cfg.command = args.command
cfg.version_file = pathlib.Path(args.version_file).absolute()
cfg.date_format = args.date_format
cfg.verbose = args.verbose
version_file_requirement = 'doesn\'t matter'
if cfg.command == 'init':
version_file_requirement = 'none'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
cfg.value = args.value
elif cfg.command == 'up':
version_file_requirement = 'required'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
cfg.value = args.value
if args.major:
cfg.up_part = 'major'
elif args.minor:
cfg.up_part = 'minor'
elif args.patch:
cfg.up_part = 'patch'
elif cfg.command == 'set':
version_file_requirement = 'required'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
if args.value:
cfg.value = args.value
else:
cfg.value = (
args.major, args.minor, args.patch,
args.prerelease, args.build
)
if all(value is None for value in cfg.value):
p.error("Version is not specified")
elif cfg.command == 'tag':
version_file_requirement = 'required'
cfg.vcs_tag_params = args.vcs_tag_params or []
elif cfg.command is None:
cfg.command = 'read'
version_file_requirement = 'required'
if version_file_requirement == 'required':
if not cfg.version_file.exists():
p.error("Version file \"%s\" doesn't exists" % cfg.version_file)
elif version_file_requirement == 'none':
if cfg.version_file.exists():
p.error("Version file \"%s\" already exists" % cfg.version_file) | Parse input arguments of script.
:param args: list of parameters
:type: argparse.Namespace
:param cfg: configuration storage
:type: Config | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/cli.py#L24-L178 | [
"def get_aliases_for(name):\n \"\"\"Find aliases for given command name\n\n :param name: str\n :return: Str[]\n \"\"\"\n return COMMAND_ALIASES[name]\n"
] | """Main CLI engine for versionner
Parse input options, and make all dirty jobs.
"""
# pylint: disable=wrong-import-position
from versionner import utils
utils.validate_python_version()
# pylint: disable=wrong-import-order
import argparse
import pathlib
import os.path
import sys
import versionner
from versionner import config
from versionner import defaults
from versionner import commands
from versionner.errors import VersionnerError
# pylint: disable=too-many-branches,too-many-statements
def _find_project_config_file(user_config_file):
"""Find path to project-wide config file
Search from current working directory, and traverse path up to
directory with .versionner.rc file or root directory
:param user_config_file: instance with user-wide config path
:type: pathlib.Path
:rtype: pathlib.Path
"""
proj_cfg_dir = pathlib.Path('.').absolute()
proj_cfg_file = None
root = pathlib.Path('/')
while proj_cfg_dir != root:
proj_cfg_file = proj_cfg_dir / defaults.RC_FILENAME
if proj_cfg_file.exists():
break
proj_cfg_file = None
# pylint: disable=redefined-variable-type
proj_cfg_dir = proj_cfg_dir.parent
if proj_cfg_file and proj_cfg_file != user_config_file:
return proj_cfg_file
def execute(prog, argv):
"""Execute whole program
:param prog: program name
:param argv: list: script arguments
:return:
"""
if pathlib.Path(prog).parts[-1] in ('versionner', 'versionner.py'):
print("versionner name is deprecated, use \"ver\" now!", file=sys.stderr)
cfg_files = [
pathlib.Path(os.path.expanduser('~')) / defaults.RC_FILENAME,
]
proj_cfg_file = _find_project_config_file(cfg_files[0])
if proj_cfg_file:
cfg_files.append(proj_cfg_file)
cfg = config.Config(cfg_files)
parse_args(argv, cfg)
cmd = commands.get(cfg.command, cfg)
try:
result = cmd.execute()
except VersionnerError as exc:
print('%s: %s' % (exc.__class__.__name__, exc), file=sys.stderr)
return exc.ret_code
print("Current version: %s" % (result.current_version, ))
if result.modified_files:
print('Changed' + (' and committed' if cfg.commit else '') + ' %(files)s files (%(changes)s changes)' % {
'files': result.modified_files,
'changes': result.modifications,
})
return 0
def main():
"""Main script
:return: exit code (for shell)
:rtype: int
"""
return execute(sys.argv[0], sys.argv[1:])
|
msztolcman/versionner | versionner/cli.py | _find_project_config_file | python | def _find_project_config_file(user_config_file):
proj_cfg_dir = pathlib.Path('.').absolute()
proj_cfg_file = None
root = pathlib.Path('/')
while proj_cfg_dir != root:
proj_cfg_file = proj_cfg_dir / defaults.RC_FILENAME
if proj_cfg_file.exists():
break
proj_cfg_file = None
# pylint: disable=redefined-variable-type
proj_cfg_dir = proj_cfg_dir.parent
if proj_cfg_file and proj_cfg_file != user_config_file:
return proj_cfg_file | Find path to project-wide config file
Search from current working directory, and traverse path up to
directory with .versionner.rc file or root directory
:param user_config_file: instance with user-wide config path
:type: pathlib.Path
:rtype: pathlib.Path | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/cli.py#L181-L203 | null | """Main CLI engine for versionner
Parse input options, and make all dirty jobs.
"""
# pylint: disable=wrong-import-position
from versionner import utils
utils.validate_python_version()
# pylint: disable=wrong-import-order
import argparse
import pathlib
import os.path
import sys
import versionner
from versionner import config
from versionner import defaults
from versionner import commands
from versionner.errors import VersionnerError
# pylint: disable=too-many-branches,too-many-statements
def parse_args(args, cfg):
"""Parse input arguments of script.
:param args: list of parameters
:type: argparse.Namespace
:param cfg: configuration storage
:type: Config
"""
prog = pathlib.Path(sys.argv[0]).parts[-1].replace('.py', '')
prog_version = "%%(prog)s %s" % versionner.__version__
# pylint: disable=invalid-name
p = argparse.ArgumentParser(prog=prog, description='Helps manipulating version of the project')
p.add_argument('--file', '-f', dest='version_file', type=str,
default=cfg.version_file,
help="path to file where version is saved")
p.add_argument('--version', '-v', action="version", version=prog_version)
p.add_argument('--date-format', type=str,
default=cfg.date_format,
help="Date format used in project files")
p.add_argument('--verbose', action="store_true",
help="Be more verbose if it's possible")
sub = p.add_subparsers(dest='command')
p_init = sub.add_parser('init', aliases=commands.get_aliases_for('init'),
help="Create new version file")
p_init.add_argument('value', nargs='?', type=str,
default=cfg.default_init_version,
help="Initial version")
p_init.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_init.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_init.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `up` command (only if there is no changes in repo before)")
p_up = sub.add_parser('up', aliases=commands.get_aliases_for('up'),
help="Increase version")
p_up.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_up.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_up.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `up` command (only if there is no changes in repo before)")
p_up.add_argument('value', nargs='?', type=int,
default=cfg.default_increase_value,
help="Increase version by this value (default: %d)" % cfg.default_increase_value)
p_up_gr = p_up.add_mutually_exclusive_group()
p_up_gr.add_argument('--major', '-j', action="store_true",
help="increase major part of version" + (" (project default)" if cfg.up_part == 'major' else ""))
p_up_gr.add_argument('--minor', '-n', action="store_true",
help="increase minor part of version" + (" (project default)" if cfg.up_part == 'minor' else ""))
p_up_gr.add_argument('--patch', '-p', action="store_true",
help="increase patch part of version" + (" (project default)" if cfg.up_part == 'patch' else ""))
p_set = sub.add_parser('set', aliases=commands.get_aliases_for('set'),
help="Set version to specified one")
p_set.add_argument('--major', '-j', type=int,
help="set major part of version to MAJOR")
p_set.add_argument('--minor', '-n', type=int,
help="set minor part of version to MINOR")
p_set.add_argument('--patch', '-p', type=int,
help="set patch part of version to PATCH")
p_set.add_argument('--prerelease', '-r', type=str,
help="set prerelease part of version to PRERELEASE")
p_set.add_argument('--build', '-b', type=str,
help="set build part of version to BUILD")
p_set.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_set.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_set.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `set` command (only if there is no changes in repo before)")
p_set.add_argument('value', nargs='?', type=str,
help="set version to this value")
p_tag = sub.add_parser('tag', aliases=commands.get_aliases_for('tag'),
help="Create VCS tag with current version")
p_tag.add_argument('--vcs-tag-param', dest='vcs_tag_params', type=str, action="append",
help="Additional params for VCS for \"tag\" command")
sub.add_parser('read', aliases=commands.get_aliases_for('read'),
help="Read current version")
args = p.parse_args(args)
cfg.command = args.command
cfg.version_file = pathlib.Path(args.version_file).absolute()
cfg.date_format = args.date_format
cfg.verbose = args.verbose
version_file_requirement = 'doesn\'t matter'
if cfg.command == 'init':
version_file_requirement = 'none'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
cfg.value = args.value
elif cfg.command == 'up':
version_file_requirement = 'required'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
cfg.value = args.value
if args.major:
cfg.up_part = 'major'
elif args.minor:
cfg.up_part = 'minor'
elif args.patch:
cfg.up_part = 'patch'
elif cfg.command == 'set':
version_file_requirement = 'required'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
if args.value:
cfg.value = args.value
else:
cfg.value = (
args.major, args.minor, args.patch,
args.prerelease, args.build
)
if all(value is None for value in cfg.value):
p.error("Version is not specified")
elif cfg.command == 'tag':
version_file_requirement = 'required'
cfg.vcs_tag_params = args.vcs_tag_params or []
elif cfg.command is None:
cfg.command = 'read'
version_file_requirement = 'required'
if version_file_requirement == 'required':
if not cfg.version_file.exists():
p.error("Version file \"%s\" doesn't exists" % cfg.version_file)
elif version_file_requirement == 'none':
if cfg.version_file.exists():
p.error("Version file \"%s\" already exists" % cfg.version_file)
def execute(prog, argv):
"""Execute whole program
:param prog: program name
:param argv: list: script arguments
:return:
"""
if pathlib.Path(prog).parts[-1] in ('versionner', 'versionner.py'):
print("versionner name is deprecated, use \"ver\" now!", file=sys.stderr)
cfg_files = [
pathlib.Path(os.path.expanduser('~')) / defaults.RC_FILENAME,
]
proj_cfg_file = _find_project_config_file(cfg_files[0])
if proj_cfg_file:
cfg_files.append(proj_cfg_file)
cfg = config.Config(cfg_files)
parse_args(argv, cfg)
cmd = commands.get(cfg.command, cfg)
try:
result = cmd.execute()
except VersionnerError as exc:
print('%s: %s' % (exc.__class__.__name__, exc), file=sys.stderr)
return exc.ret_code
print("Current version: %s" % (result.current_version, ))
if result.modified_files:
print('Changed' + (' and committed' if cfg.commit else '') + ' %(files)s files (%(changes)s changes)' % {
'files': result.modified_files,
'changes': result.modifications,
})
return 0
def main():
"""Main script
:return: exit code (for shell)
:rtype: int
"""
return execute(sys.argv[0], sys.argv[1:])
|
msztolcman/versionner | versionner/cli.py | execute | python | def execute(prog, argv):
if pathlib.Path(prog).parts[-1] in ('versionner', 'versionner.py'):
print("versionner name is deprecated, use \"ver\" now!", file=sys.stderr)
cfg_files = [
pathlib.Path(os.path.expanduser('~')) / defaults.RC_FILENAME,
]
proj_cfg_file = _find_project_config_file(cfg_files[0])
if proj_cfg_file:
cfg_files.append(proj_cfg_file)
cfg = config.Config(cfg_files)
parse_args(argv, cfg)
cmd = commands.get(cfg.command, cfg)
try:
result = cmd.execute()
except VersionnerError as exc:
print('%s: %s' % (exc.__class__.__name__, exc), file=sys.stderr)
return exc.ret_code
print("Current version: %s" % (result.current_version, ))
if result.modified_files:
print('Changed' + (' and committed' if cfg.commit else '') + ' %(files)s files (%(changes)s changes)' % {
'files': result.modified_files,
'changes': result.modifications,
})
return 0 | Execute whole program
:param prog: program name
:param argv: list: script arguments
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/cli.py#L206-L242 | [
"def get(name, *args, **kwargs):\n \"\"\"Find command class for given command name and return it's instance\n\n :param name: str\n :param args: additional arguments for Command\n :param kwargs: additional arguments for Command\n :return: Command\n \"\"\"\n cmd = COMMAND_MAPPER.get(name)\n return cmd(*args, **kwargs)\n",
"def parse_args(args, cfg):\n \"\"\"Parse input arguments of script.\n\n :param args: list of parameters\n :type: argparse.Namespace\n :param cfg: configuration storage\n :type: Config\n \"\"\"\n prog = pathlib.Path(sys.argv[0]).parts[-1].replace('.py', '')\n prog_version = \"%%(prog)s %s\" % versionner.__version__\n\n # pylint: disable=invalid-name\n p = argparse.ArgumentParser(prog=prog, description='Helps manipulating version of the project')\n p.add_argument('--file', '-f', dest='version_file', type=str,\n default=cfg.version_file,\n help=\"path to file where version is saved\")\n p.add_argument('--version', '-v', action=\"version\", version=prog_version)\n p.add_argument('--date-format', type=str,\n default=cfg.date_format,\n help=\"Date format used in project files\")\n p.add_argument('--verbose', action=\"store_true\",\n help=\"Be more verbose if it's possible\")\n\n sub = p.add_subparsers(dest='command')\n\n p_init = sub.add_parser('init', aliases=commands.get_aliases_for('init'),\n help=\"Create new version file\")\n p_init.add_argument('value', nargs='?', type=str,\n default=cfg.default_init_version,\n help=\"Initial version\")\n p_init.add_argument('--vcs-engine', type=str,\n default=cfg.vcs_engine,\n help=\"Select VCS engine (only git is supported currently)\", )\n p_init.add_argument('--vcs-commit-message', '-m', type=str,\n default=cfg.vcs_commit_message,\n help=\"Commit message used when committing changes\")\n p_init.add_argument('--commit', '-c', action='store_true',\n help=\"Commit changes done by `up` command (only if there is no changes in repo before)\")\n\n p_up = sub.add_parser('up', aliases=commands.get_aliases_for('up'),\n help=\"Increase version\")\n p_up.add_argument('--vcs-engine', type=str,\n default=cfg.vcs_engine,\n help=\"Select VCS engine (only git is supported currently)\", )\n p_up.add_argument('--vcs-commit-message', '-m', type=str,\n default=cfg.vcs_commit_message,\n help=\"Commit message used when committing changes\")\n p_up.add_argument('--commit', '-c', action='store_true',\n help=\"Commit changes done by `up` command (only if there is no changes in repo before)\")\n p_up.add_argument('value', nargs='?', type=int,\n default=cfg.default_increase_value,\n help=\"Increase version by this value (default: %d)\" % cfg.default_increase_value)\n\n p_up_gr = p_up.add_mutually_exclusive_group()\n p_up_gr.add_argument('--major', '-j', action=\"store_true\",\n help=\"increase major part of version\" + (\" (project default)\" if cfg.up_part == 'major' else \"\"))\n p_up_gr.add_argument('--minor', '-n', action=\"store_true\",\n help=\"increase minor part of version\" + (\" (project default)\" if cfg.up_part == 'minor' else \"\"))\n p_up_gr.add_argument('--patch', '-p', action=\"store_true\",\n help=\"increase patch part of version\" + (\" (project default)\" if cfg.up_part == 'patch' else \"\"))\n\n p_set = sub.add_parser('set', aliases=commands.get_aliases_for('set'),\n help=\"Set version to specified one\")\n p_set.add_argument('--major', '-j', type=int,\n help=\"set major part of version to MAJOR\")\n p_set.add_argument('--minor', '-n', type=int,\n help=\"set minor part of version to MINOR\")\n p_set.add_argument('--patch', '-p', type=int,\n help=\"set patch part of version to PATCH\")\n p_set.add_argument('--prerelease', '-r', type=str,\n help=\"set prerelease part of version to PRERELEASE\")\n p_set.add_argument('--build', '-b', type=str,\n help=\"set build part of version to BUILD\")\n p_set.add_argument('--vcs-engine', type=str,\n default=cfg.vcs_engine,\n help=\"Select VCS engine (only git is supported currently)\", )\n p_set.add_argument('--vcs-commit-message', '-m', type=str,\n default=cfg.vcs_commit_message,\n help=\"Commit message used when committing changes\")\n p_set.add_argument('--commit', '-c', action='store_true',\n help=\"Commit changes done by `set` command (only if there is no changes in repo before)\")\n p_set.add_argument('value', nargs='?', type=str,\n help=\"set version to this value\")\n\n p_tag = sub.add_parser('tag', aliases=commands.get_aliases_for('tag'),\n help=\"Create VCS tag with current version\")\n p_tag.add_argument('--vcs-tag-param', dest='vcs_tag_params', type=str, action=\"append\",\n help=\"Additional params for VCS for \\\"tag\\\" command\")\n\n sub.add_parser('read', aliases=commands.get_aliases_for('read'),\n help=\"Read current version\")\n\n args = p.parse_args(args)\n\n cfg.command = args.command\n cfg.version_file = pathlib.Path(args.version_file).absolute()\n cfg.date_format = args.date_format\n cfg.verbose = args.verbose\n\n version_file_requirement = 'doesn\\'t matter'\n if cfg.command == 'init':\n version_file_requirement = 'none'\n\n cfg.commit = args.commit\n cfg.vcs_engine = args.vcs_engine\n cfg.vcs_commit_message = args.vcs_commit_message\n cfg.value = args.value\n\n elif cfg.command == 'up':\n version_file_requirement = 'required'\n\n cfg.commit = args.commit\n cfg.vcs_engine = args.vcs_engine\n cfg.vcs_commit_message = args.vcs_commit_message\n cfg.value = args.value\n if args.major:\n cfg.up_part = 'major'\n elif args.minor:\n cfg.up_part = 'minor'\n elif args.patch:\n cfg.up_part = 'patch'\n\n elif cfg.command == 'set':\n version_file_requirement = 'required'\n\n cfg.commit = args.commit\n cfg.vcs_engine = args.vcs_engine\n cfg.vcs_commit_message = args.vcs_commit_message\n\n if args.value:\n cfg.value = args.value\n else:\n cfg.value = (\n args.major, args.minor, args.patch,\n args.prerelease, args.build\n )\n\n if all(value is None for value in cfg.value):\n p.error(\"Version is not specified\")\n\n elif cfg.command == 'tag':\n version_file_requirement = 'required'\n\n cfg.vcs_tag_params = args.vcs_tag_params or []\n\n elif cfg.command is None:\n cfg.command = 'read'\n version_file_requirement = 'required'\n\n if version_file_requirement == 'required':\n if not cfg.version_file.exists():\n p.error(\"Version file \\\"%s\\\" doesn't exists\" % cfg.version_file)\n elif version_file_requirement == 'none':\n if cfg.version_file.exists():\n p.error(\"Version file \\\"%s\\\" already exists\" % cfg.version_file)\n",
"def _find_project_config_file(user_config_file):\n \"\"\"Find path to project-wide config file\n Search from current working directory, and traverse path up to\n directory with .versionner.rc file or root directory\n\n :param user_config_file: instance with user-wide config path\n :type: pathlib.Path\n :rtype: pathlib.Path\n \"\"\"\n proj_cfg_dir = pathlib.Path('.').absolute()\n proj_cfg_file = None\n root = pathlib.Path('/')\n while proj_cfg_dir != root:\n proj_cfg_file = proj_cfg_dir / defaults.RC_FILENAME\n if proj_cfg_file.exists():\n break\n\n proj_cfg_file = None\n # pylint: disable=redefined-variable-type\n proj_cfg_dir = proj_cfg_dir.parent\n\n if proj_cfg_file and proj_cfg_file != user_config_file:\n return proj_cfg_file\n"
] | """Main CLI engine for versionner
Parse input options, and make all dirty jobs.
"""
# pylint: disable=wrong-import-position
from versionner import utils
utils.validate_python_version()
# pylint: disable=wrong-import-order
import argparse
import pathlib
import os.path
import sys
import versionner
from versionner import config
from versionner import defaults
from versionner import commands
from versionner.errors import VersionnerError
# pylint: disable=too-many-branches,too-many-statements
def parse_args(args, cfg):
"""Parse input arguments of script.
:param args: list of parameters
:type: argparse.Namespace
:param cfg: configuration storage
:type: Config
"""
prog = pathlib.Path(sys.argv[0]).parts[-1].replace('.py', '')
prog_version = "%%(prog)s %s" % versionner.__version__
# pylint: disable=invalid-name
p = argparse.ArgumentParser(prog=prog, description='Helps manipulating version of the project')
p.add_argument('--file', '-f', dest='version_file', type=str,
default=cfg.version_file,
help="path to file where version is saved")
p.add_argument('--version', '-v', action="version", version=prog_version)
p.add_argument('--date-format', type=str,
default=cfg.date_format,
help="Date format used in project files")
p.add_argument('--verbose', action="store_true",
help="Be more verbose if it's possible")
sub = p.add_subparsers(dest='command')
p_init = sub.add_parser('init', aliases=commands.get_aliases_for('init'),
help="Create new version file")
p_init.add_argument('value', nargs='?', type=str,
default=cfg.default_init_version,
help="Initial version")
p_init.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_init.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_init.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `up` command (only if there is no changes in repo before)")
p_up = sub.add_parser('up', aliases=commands.get_aliases_for('up'),
help="Increase version")
p_up.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_up.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_up.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `up` command (only if there is no changes in repo before)")
p_up.add_argument('value', nargs='?', type=int,
default=cfg.default_increase_value,
help="Increase version by this value (default: %d)" % cfg.default_increase_value)
p_up_gr = p_up.add_mutually_exclusive_group()
p_up_gr.add_argument('--major', '-j', action="store_true",
help="increase major part of version" + (" (project default)" if cfg.up_part == 'major' else ""))
p_up_gr.add_argument('--minor', '-n', action="store_true",
help="increase minor part of version" + (" (project default)" if cfg.up_part == 'minor' else ""))
p_up_gr.add_argument('--patch', '-p', action="store_true",
help="increase patch part of version" + (" (project default)" if cfg.up_part == 'patch' else ""))
p_set = sub.add_parser('set', aliases=commands.get_aliases_for('set'),
help="Set version to specified one")
p_set.add_argument('--major', '-j', type=int,
help="set major part of version to MAJOR")
p_set.add_argument('--minor', '-n', type=int,
help="set minor part of version to MINOR")
p_set.add_argument('--patch', '-p', type=int,
help="set patch part of version to PATCH")
p_set.add_argument('--prerelease', '-r', type=str,
help="set prerelease part of version to PRERELEASE")
p_set.add_argument('--build', '-b', type=str,
help="set build part of version to BUILD")
p_set.add_argument('--vcs-engine', type=str,
default=cfg.vcs_engine,
help="Select VCS engine (only git is supported currently)", )
p_set.add_argument('--vcs-commit-message', '-m', type=str,
default=cfg.vcs_commit_message,
help="Commit message used when committing changes")
p_set.add_argument('--commit', '-c', action='store_true',
help="Commit changes done by `set` command (only if there is no changes in repo before)")
p_set.add_argument('value', nargs='?', type=str,
help="set version to this value")
p_tag = sub.add_parser('tag', aliases=commands.get_aliases_for('tag'),
help="Create VCS tag with current version")
p_tag.add_argument('--vcs-tag-param', dest='vcs_tag_params', type=str, action="append",
help="Additional params for VCS for \"tag\" command")
sub.add_parser('read', aliases=commands.get_aliases_for('read'),
help="Read current version")
args = p.parse_args(args)
cfg.command = args.command
cfg.version_file = pathlib.Path(args.version_file).absolute()
cfg.date_format = args.date_format
cfg.verbose = args.verbose
version_file_requirement = 'doesn\'t matter'
if cfg.command == 'init':
version_file_requirement = 'none'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
cfg.value = args.value
elif cfg.command == 'up':
version_file_requirement = 'required'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
cfg.value = args.value
if args.major:
cfg.up_part = 'major'
elif args.minor:
cfg.up_part = 'minor'
elif args.patch:
cfg.up_part = 'patch'
elif cfg.command == 'set':
version_file_requirement = 'required'
cfg.commit = args.commit
cfg.vcs_engine = args.vcs_engine
cfg.vcs_commit_message = args.vcs_commit_message
if args.value:
cfg.value = args.value
else:
cfg.value = (
args.major, args.minor, args.patch,
args.prerelease, args.build
)
if all(value is None for value in cfg.value):
p.error("Version is not specified")
elif cfg.command == 'tag':
version_file_requirement = 'required'
cfg.vcs_tag_params = args.vcs_tag_params or []
elif cfg.command is None:
cfg.command = 'read'
version_file_requirement = 'required'
if version_file_requirement == 'required':
if not cfg.version_file.exists():
p.error("Version file \"%s\" doesn't exists" % cfg.version_file)
elif version_file_requirement == 'none':
if cfg.version_file.exists():
p.error("Version file \"%s\" already exists" % cfg.version_file)
def _find_project_config_file(user_config_file):
"""Find path to project-wide config file
Search from current working directory, and traverse path up to
directory with .versionner.rc file or root directory
:param user_config_file: instance with user-wide config path
:type: pathlib.Path
:rtype: pathlib.Path
"""
proj_cfg_dir = pathlib.Path('.').absolute()
proj_cfg_file = None
root = pathlib.Path('/')
while proj_cfg_dir != root:
proj_cfg_file = proj_cfg_dir / defaults.RC_FILENAME
if proj_cfg_file.exists():
break
proj_cfg_file = None
# pylint: disable=redefined-variable-type
proj_cfg_dir = proj_cfg_dir.parent
if proj_cfg_file and proj_cfg_file != user_config_file:
return proj_cfg_file
def main():
"""Main script
:return: exit code (for shell)
:rtype: int
"""
return execute(sys.argv[0], sys.argv[1:])
|
msztolcman/versionner | versionner/config.py | FileConfig.validate | python | def validate(self):
if not self.file.exists():
raise ValueError("File \"%s\" doesn't exists")
if not self.search:
raise ValueError("Search cannot be empty")
if not self.replace:
raise ValueError("Replace cannot be empty")
if self.match not in ('file', 'line'):
raise ValueError("Match must be one of: file, line")
try:
codecs.lookup(self.encoding)
except LookupError:
raise ValueError("Unknown encoding: \"%s\"" % self.encoding) | Validate current file configuration
:raise ValueError: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L39-L59 | null | class FileConfig:
"""Single project file configuration"""
def __init__(self, filename, cfg):
"""
Evaluate single file configuration
:param filename:
:param cfg:
"""
self.filename = filename
self.file = pathlib.Path(filename)
self.enabled = cfg.getboolean('enabled', True)
self.search = cfg['search']
self.replace = cfg['replace']
self.date_format = cfg.get('date_format', None)
self.match = cfg.get('match', 'line')
self.search_flags = 0
self.encoding = cfg.get('encoding', 'utf-8')
search_flags = cfg.get('search_flags', '')
if search_flags:
search_flags = re.split(r'\s*,\s*', search_flags)
for search_flag in search_flags:
self.search_flags |= getattr(re, search_flag.upper())
def __repr__(self):
return '<FileConfig(%s)>' % self.filename
|
msztolcman/versionner | versionner/config.py | Config._parse_config_file | python | def _parse_config_file(self, cfg_files):
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler) | Parse config file (ini) and set properties
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L106-L118 | [
"def _parse_global_section(self, cfg_handler):\n \"\"\"Parse global ([versionner]) section\n\n :param cfg_handler:\n :return:\n \"\"\"\n # global configuration\n if 'versionner' in cfg_handler:\n cfg = cfg_handler['versionner']\n if 'file' in cfg:\n self.version_file = cfg['file']\n if 'date_format' in cfg:\n self.date_format = cfg['date_format']\n if 'up_part' in cfg:\n self.up_part = cfg['up_part']\n if 'default_init_version' in cfg:\n self.default_init_version = cfg['default_init_version']\n if 'default_increase_value' in cfg:\n self.default_increase_value = cfg.getint('default_increase_value')\n",
"def _parse_vcs_section(self, cfg_handler):\n \"\"\"Parse [vcs] section\n\n :param cfg_handler:\n :return:\n \"\"\"\n if 'vcs' in cfg_handler:\n cfg = cfg_handler['vcs']\n if 'engine' in cfg:\n self.vcs_engine = cfg['engine']\n if 'tag_params' in cfg:\n self.vcs_tag_params = list(filter(None, cfg['tag_params'].split(\"\\n\")))\n if 'commit_message' in cfg:\n self.vcs_commit_message = cfg['commit_message']\n",
"def _parse_file_section(self, cfg_handler):\n \"\"\"Parse [file:*] sections\n\n :param cfg_handler:\n :return:\n \"\"\"\n\n _number_rxp = re.compile(r'^\\d+:(.)')\n # project files configuration\n for section in cfg_handler.sections():\n if section.startswith('file:'):\n path = section[5:]\n path = _number_rxp.sub(r'\\1', path)\n\n project_file = FileConfig(path, cfg_handler[section])\n\n if not project_file.date_format:\n project_file.date_format = self.date_format\n\n if project_file.enabled:\n try:\n project_file.validate()\n except ValueError as exc:\n print(\"Incorrect configuration for file \\\"%s\\\": %s\" % (project_file.filename, exc.args[0]), file=sys.stderr)\n else:\n self.files.append(project_file)\n"
] | class Config:
"""Configuration"""
__slots__ = (
'command',
'commit',
'date_format',
'default_init_version',
'default_increase_value',
'files',
'value',
'up_part',
'vcs_commit_message',
'vcs_engine',
'vcs_tag_params',
'verbose',
'version_file',
)
def __init__(self, files=None):
"""Evaluate configuration
:return:
"""
self.command = None
self.commit = False
self.date_format = defaults.DEFAULT_DATE_FORMAT
self.default_init_version = defaults.DEFAULT_INIT_VERSION
self.default_increase_value = defaults.DEFAULT_INCREASE_VALUE
self.files = []
self.value = None
self.up_part = defaults.DEFAULT_UP_PART
self.vcs_commit_message = defaults.DEFAULT_VCS_COMMIT_MESSAGE
self.vcs_engine = 'git'
self.vcs_tag_params = []
self.verbose = False
self.version_file = defaults.DEFAULT_VERSION_FILE
if files:
self._parse_config_file(files)
def _parse_global_section(self, cfg_handler):
"""Parse global ([versionner]) section
:param cfg_handler:
:return:
"""
# global configuration
if 'versionner' in cfg_handler:
cfg = cfg_handler['versionner']
if 'file' in cfg:
self.version_file = cfg['file']
if 'date_format' in cfg:
self.date_format = cfg['date_format']
if 'up_part' in cfg:
self.up_part = cfg['up_part']
if 'default_init_version' in cfg:
self.default_init_version = cfg['default_init_version']
if 'default_increase_value' in cfg:
self.default_increase_value = cfg.getint('default_increase_value')
def _parse_vcs_section(self, cfg_handler):
"""Parse [vcs] section
:param cfg_handler:
:return:
"""
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine']
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split("\n")))
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message']
def _parse_file_section(self, cfg_handler):
"""Parse [file:*] sections
:param cfg_handler:
:return:
"""
_number_rxp = re.compile(r'^\d+:(.)')
# project files configuration
for section in cfg_handler.sections():
if section.startswith('file:'):
path = section[5:]
path = _number_rxp.sub(r'\1', path)
project_file = FileConfig(path, cfg_handler[section])
if not project_file.date_format:
project_file.date_format = self.date_format
if project_file.enabled:
try:
project_file.validate()
except ValueError as exc:
print("Incorrect configuration for file \"%s\": %s" % (project_file.filename, exc.args[0]), file=sys.stderr)
else:
self.files.append(project_file)
def __repr__(self):
ret = '<' + self.__class__.__name__ + ': '
ret += ', '.join('%s=%r' % (name, getattr(self, name)) for name in self.__slots__)
return ret
|
msztolcman/versionner | versionner/config.py | Config._parse_global_section | python | def _parse_global_section(self, cfg_handler):
# global configuration
if 'versionner' in cfg_handler:
cfg = cfg_handler['versionner']
if 'file' in cfg:
self.version_file = cfg['file']
if 'date_format' in cfg:
self.date_format = cfg['date_format']
if 'up_part' in cfg:
self.up_part = cfg['up_part']
if 'default_init_version' in cfg:
self.default_init_version = cfg['default_init_version']
if 'default_increase_value' in cfg:
self.default_increase_value = cfg.getint('default_increase_value') | Parse global ([versionner]) section
:param cfg_handler:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L120-L138 | null | class Config:
"""Configuration"""
__slots__ = (
'command',
'commit',
'date_format',
'default_init_version',
'default_increase_value',
'files',
'value',
'up_part',
'vcs_commit_message',
'vcs_engine',
'vcs_tag_params',
'verbose',
'version_file',
)
def __init__(self, files=None):
"""Evaluate configuration
:return:
"""
self.command = None
self.commit = False
self.date_format = defaults.DEFAULT_DATE_FORMAT
self.default_init_version = defaults.DEFAULT_INIT_VERSION
self.default_increase_value = defaults.DEFAULT_INCREASE_VALUE
self.files = []
self.value = None
self.up_part = defaults.DEFAULT_UP_PART
self.vcs_commit_message = defaults.DEFAULT_VCS_COMMIT_MESSAGE
self.vcs_engine = 'git'
self.vcs_tag_params = []
self.verbose = False
self.version_file = defaults.DEFAULT_VERSION_FILE
if files:
self._parse_config_file(files)
def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler)
def _parse_vcs_section(self, cfg_handler):
"""Parse [vcs] section
:param cfg_handler:
:return:
"""
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine']
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split("\n")))
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message']
def _parse_file_section(self, cfg_handler):
"""Parse [file:*] sections
:param cfg_handler:
:return:
"""
_number_rxp = re.compile(r'^\d+:(.)')
# project files configuration
for section in cfg_handler.sections():
if section.startswith('file:'):
path = section[5:]
path = _number_rxp.sub(r'\1', path)
project_file = FileConfig(path, cfg_handler[section])
if not project_file.date_format:
project_file.date_format = self.date_format
if project_file.enabled:
try:
project_file.validate()
except ValueError as exc:
print("Incorrect configuration for file \"%s\": %s" % (project_file.filename, exc.args[0]), file=sys.stderr)
else:
self.files.append(project_file)
def __repr__(self):
ret = '<' + self.__class__.__name__ + ': '
ret += ', '.join('%s=%r' % (name, getattr(self, name)) for name in self.__slots__)
return ret
|
msztolcman/versionner | versionner/config.py | Config._parse_vcs_section | python | def _parse_vcs_section(self, cfg_handler):
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine']
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split("\n")))
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message'] | Parse [vcs] section
:param cfg_handler:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L140-L153 | null | class Config:
"""Configuration"""
__slots__ = (
'command',
'commit',
'date_format',
'default_init_version',
'default_increase_value',
'files',
'value',
'up_part',
'vcs_commit_message',
'vcs_engine',
'vcs_tag_params',
'verbose',
'version_file',
)
def __init__(self, files=None):
"""Evaluate configuration
:return:
"""
self.command = None
self.commit = False
self.date_format = defaults.DEFAULT_DATE_FORMAT
self.default_init_version = defaults.DEFAULT_INIT_VERSION
self.default_increase_value = defaults.DEFAULT_INCREASE_VALUE
self.files = []
self.value = None
self.up_part = defaults.DEFAULT_UP_PART
self.vcs_commit_message = defaults.DEFAULT_VCS_COMMIT_MESSAGE
self.vcs_engine = 'git'
self.vcs_tag_params = []
self.verbose = False
self.version_file = defaults.DEFAULT_VERSION_FILE
if files:
self._parse_config_file(files)
def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler)
def _parse_global_section(self, cfg_handler):
"""Parse global ([versionner]) section
:param cfg_handler:
:return:
"""
# global configuration
if 'versionner' in cfg_handler:
cfg = cfg_handler['versionner']
if 'file' in cfg:
self.version_file = cfg['file']
if 'date_format' in cfg:
self.date_format = cfg['date_format']
if 'up_part' in cfg:
self.up_part = cfg['up_part']
if 'default_init_version' in cfg:
self.default_init_version = cfg['default_init_version']
if 'default_increase_value' in cfg:
self.default_increase_value = cfg.getint('default_increase_value')
def _parse_file_section(self, cfg_handler):
"""Parse [file:*] sections
:param cfg_handler:
:return:
"""
_number_rxp = re.compile(r'^\d+:(.)')
# project files configuration
for section in cfg_handler.sections():
if section.startswith('file:'):
path = section[5:]
path = _number_rxp.sub(r'\1', path)
project_file = FileConfig(path, cfg_handler[section])
if not project_file.date_format:
project_file.date_format = self.date_format
if project_file.enabled:
try:
project_file.validate()
except ValueError as exc:
print("Incorrect configuration for file \"%s\": %s" % (project_file.filename, exc.args[0]), file=sys.stderr)
else:
self.files.append(project_file)
def __repr__(self):
ret = '<' + self.__class__.__name__ + ': '
ret += ', '.join('%s=%r' % (name, getattr(self, name)) for name in self.__slots__)
return ret
|
msztolcman/versionner | versionner/config.py | Config._parse_file_section | python | def _parse_file_section(self, cfg_handler):
_number_rxp = re.compile(r'^\d+:(.)')
# project files configuration
for section in cfg_handler.sections():
if section.startswith('file:'):
path = section[5:]
path = _number_rxp.sub(r'\1', path)
project_file = FileConfig(path, cfg_handler[section])
if not project_file.date_format:
project_file.date_format = self.date_format
if project_file.enabled:
try:
project_file.validate()
except ValueError as exc:
print("Incorrect configuration for file \"%s\": %s" % (project_file.filename, exc.args[0]), file=sys.stderr)
else:
self.files.append(project_file) | Parse [file:*] sections
:param cfg_handler:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/config.py#L155-L180 | null | class Config:
"""Configuration"""
__slots__ = (
'command',
'commit',
'date_format',
'default_init_version',
'default_increase_value',
'files',
'value',
'up_part',
'vcs_commit_message',
'vcs_engine',
'vcs_tag_params',
'verbose',
'version_file',
)
def __init__(self, files=None):
"""Evaluate configuration
:return:
"""
self.command = None
self.commit = False
self.date_format = defaults.DEFAULT_DATE_FORMAT
self.default_init_version = defaults.DEFAULT_INIT_VERSION
self.default_increase_value = defaults.DEFAULT_INCREASE_VALUE
self.files = []
self.value = None
self.up_part = defaults.DEFAULT_UP_PART
self.vcs_commit_message = defaults.DEFAULT_VCS_COMMIT_MESSAGE
self.vcs_engine = 'git'
self.vcs_tag_params = []
self.verbose = False
self.version_file = defaults.DEFAULT_VERSION_FILE
if files:
self._parse_config_file(files)
def _parse_config_file(self, cfg_files):
"""Parse config file (ini) and set properties
:return:
"""
cfg_handler = configparser.ConfigParser(interpolation=None)
if not cfg_handler.read(map(str, cfg_files)):
return
self._parse_global_section(cfg_handler)
self._parse_vcs_section(cfg_handler)
self._parse_file_section(cfg_handler)
def _parse_global_section(self, cfg_handler):
"""Parse global ([versionner]) section
:param cfg_handler:
:return:
"""
# global configuration
if 'versionner' in cfg_handler:
cfg = cfg_handler['versionner']
if 'file' in cfg:
self.version_file = cfg['file']
if 'date_format' in cfg:
self.date_format = cfg['date_format']
if 'up_part' in cfg:
self.up_part = cfg['up_part']
if 'default_init_version' in cfg:
self.default_init_version = cfg['default_init_version']
if 'default_increase_value' in cfg:
self.default_increase_value = cfg.getint('default_increase_value')
def _parse_vcs_section(self, cfg_handler):
"""Parse [vcs] section
:param cfg_handler:
:return:
"""
if 'vcs' in cfg_handler:
cfg = cfg_handler['vcs']
if 'engine' in cfg:
self.vcs_engine = cfg['engine']
if 'tag_params' in cfg:
self.vcs_tag_params = list(filter(None, cfg['tag_params'].split("\n")))
if 'commit_message' in cfg:
self.vcs_commit_message = cfg['commit_message']
def __repr__(self):
ret = '<' + self.__class__.__name__ + ': '
ret += ', '.join('%s=%r' % (name, getattr(self, name)) for name in self.__slots__)
return ret
|
msztolcman/versionner | versionner/version.py | VersionFile.read | python | def read(self):
with self._path.open(mode='r') as fh:
version = fh.read().strip()
return Version(version) | Read version from version file
:rtype : Version
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/version.py#L205-L213 | null | class VersionFile():
"""Manipulate project version file"""
__slots__ = ('_path', )
def __init__(self, path):
"""Initialisation
:param path:pathlib.Path
"""
self._path = path
def write(self, version):
"""Save new version into self._path in safe way (using temporary file)
:param version:Version
"""
with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
fh.write(str(version))
if self._path.exists():
shutil.copystat(str(self._path), fh.name)
try:
pathlib.Path(fh.name).rename(self._path)
except OSError as exc:
# handling situation with tmp file on another device
if exc.errno == 18 and 'Invalid cross-device link' in exc.strerror:
with self._path.open(mode='w') as fh:
fh.write(str(version))
else:
raise
def __str__(self):
return str(self._path)
|
msztolcman/versionner | versionner/version.py | VersionFile.write | python | def write(self, version):
with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
fh.write(str(version))
if self._path.exists():
shutil.copystat(str(self._path), fh.name)
try:
pathlib.Path(fh.name).rename(self._path)
except OSError as exc:
# handling situation with tmp file on another device
if exc.errno == 18 and 'Invalid cross-device link' in exc.strerror:
with self._path.open(mode='w') as fh:
fh.write(str(version))
else:
raise | Save new version into self._path in safe way (using temporary file)
:param version:Version | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/version.py#L215-L234 | null | class VersionFile():
"""Manipulate project version file"""
__slots__ = ('_path', )
def __init__(self, path):
"""Initialisation
:param path:pathlib.Path
"""
self._path = path
def read(self):
"""Read version from version file
:rtype : Version
:return:
"""
with self._path.open(mode='r') as fh:
version = fh.read().strip()
return Version(version)
def __str__(self):
return str(self._path)
|
msztolcman/versionner | versionner/commands/__init__.py | _manage_commands | python | def _manage_commands():
for name, (command, *aliases) in COMMANDS.items():
COMMAND_MAPPER[name] = command
for alias in aliases:
COMMAND_MAPPER[alias] = command
COMMAND_ALIASES[name] = aliases | Build COMMAND_MAPPER and COMMAND_ALIASES dictionaries using COMMANDS
@return:None | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/commands/__init__.py#L24-L33 | null | """Commands for versionner
Module defines helpers for searching for commands and aliases for them.
"""
from .command import Command, CommandOutput
from .up import Up
from .tag import Tag
from .set import Set
from .init import Init
from .read import Read
COMMANDS = {
'up': (Up, 'u'),
'init': (Init, 'i'),
'set': (Set, 's'),
'tag': (Tag, 't'),
'read': (Read, 'r'),
}
COMMAND_MAPPER = {}
COMMAND_ALIASES = {}
_manage_commands()
del _manage_commands
def get(name, *args, **kwargs):
"""Find command class for given command name and return it's instance
:param name: str
:param args: additional arguments for Command
:param kwargs: additional arguments for Command
:return: Command
"""
cmd = COMMAND_MAPPER.get(name)
return cmd(*args, **kwargs)
def get_aliases_for(name):
"""Find aliases for given command name
:param name: str
:return: Str[]
"""
return COMMAND_ALIASES[name]
|
msztolcman/versionner | versionner/commands/__init__.py | get | python | def get(name, *args, **kwargs):
cmd = COMMAND_MAPPER.get(name)
return cmd(*args, **kwargs) | Find command class for given command name and return it's instance
:param name: str
:param args: additional arguments for Command
:param kwargs: additional arguments for Command
:return: Command | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/commands/__init__.py#L38-L47 | null | """Commands for versionner
Module defines helpers for searching for commands and aliases for them.
"""
from .command import Command, CommandOutput
from .up import Up
from .tag import Tag
from .set import Set
from .init import Init
from .read import Read
COMMANDS = {
'up': (Up, 'u'),
'init': (Init, 'i'),
'set': (Set, 's'),
'tag': (Tag, 't'),
'read': (Read, 'r'),
}
COMMAND_MAPPER = {}
COMMAND_ALIASES = {}
def _manage_commands():
"""Build COMMAND_MAPPER and COMMAND_ALIASES dictionaries using COMMANDS
@return:None
"""
for name, (command, *aliases) in COMMANDS.items():
COMMAND_MAPPER[name] = command
for alias in aliases:
COMMAND_MAPPER[alias] = command
COMMAND_ALIASES[name] = aliases
_manage_commands()
del _manage_commands
def get_aliases_for(name):
"""Find aliases for given command name
:param name: str
:return: Str[]
"""
return COMMAND_ALIASES[name]
|
msztolcman/versionner | versionner/commands/files_management.py | update_project_files | python | def update_project_files(cfg, proj_version):
counters = {'files': 0, 'changes': 0}
for project_file in cfg.files:
if not project_file.file.exists():
print("File \"%s\" not found" % project_file.filename, file=sys.stderr)
continue
# prepare data
date_format = project_file.date_format or cfg.date_format
rxp = re.compile(project_file.search, project_file.search_flags)
replace = project_file.replace % {
"date": time.strftime(date_format),
"major": proj_version.major,
"minor": proj_version.minor,
"patch": proj_version.patch,
"prerelease": proj_version.prerelease,
"version": str(proj_version),
"build": proj_version.build,
}
# update project files
with \
project_file.file.open(mode="r", encoding=project_file.encoding) as fh_in, \
tempfile.NamedTemporaryFile(mode="w", encoding=project_file.encoding, delete=False) as fh_out:
if project_file.match == 'line':
changes = 0
for line in fh_in:
(line, cnt) = rxp.subn(replace, line)
if cnt:
changes += cnt
fh_out.write(line)
if changes:
counters['files'] += 1
counters['changes'] += changes
elif project_file.match == 'file':
data = fh_in.read()
(data, cnt) = rxp.subn(replace, data)
if cnt:
counters['files'] += 1
counters['changes'] += cnt
fh_out.write(data)
else:
raise ConfigError("Unknown match type: \"%s\"" % project_file.match)
fh_out.close()
shutil.copystat(project_file.filename, fh_out.name)
pathlib.Path(fh_out.name).rename(project_file.filename)
return counters | Update version string in project files
:rtype : dict
:param cfg:project configuration
:param proj_version:current version
:return:dict :raise ValueError: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/commands/files_management.py#L14-L76 | null | """Helpers for commands related to manipulating files"""
import pathlib
import re
import shutil
import sys
import tempfile
import time
from versionner import vcs
from versionner.errors import ConfigError
def save_version_and_update_files(cfg, version_file, version_to_save):
"""Save version to version_file and commit changes if required
:param cfg:
:param version_file:
:param version_to_save:
:return:
"""
with vcs.VCS(cfg.vcs_engine) as vcs_handler:
if cfg.commit:
vcs_handler.raise_if_cant_commit()
version_file.write(version_to_save)
quant = update_project_files(cfg, version_to_save)
if cfg.commit:
files = {str(file.file) for file in cfg.files}
files.add(str(cfg.version_file))
vcs_handler.add_to_stage(files)
vcs_handler.create_commit(cfg.vcs_commit_message % {'version': version_to_save})
return quant
|
msztolcman/versionner | versionner/commands/files_management.py | save_version_and_update_files | python | def save_version_and_update_files(cfg, version_file, version_to_save):
with vcs.VCS(cfg.vcs_engine) as vcs_handler:
if cfg.commit:
vcs_handler.raise_if_cant_commit()
version_file.write(version_to_save)
quant = update_project_files(cfg, version_to_save)
if cfg.commit:
files = {str(file.file) for file in cfg.files}
files.add(str(cfg.version_file))
vcs_handler.add_to_stage(files)
vcs_handler.create_commit(cfg.vcs_commit_message % {'version': version_to_save})
return quant | Save version to version_file and commit changes if required
:param cfg:
:param version_file:
:param version_to_save:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/commands/files_management.py#L79-L101 | [
"def update_project_files(cfg, proj_version):\n \"\"\"\n Update version string in project files\n\n :rtype : dict\n :param cfg:project configuration\n :param proj_version:current version\n :return:dict :raise ValueError:\n \"\"\"\n counters = {'files': 0, 'changes': 0}\n\n for project_file in cfg.files:\n if not project_file.file.exists():\n print(\"File \\\"%s\\\" not found\" % project_file.filename, file=sys.stderr)\n continue\n\n # prepare data\n date_format = project_file.date_format or cfg.date_format\n\n rxp = re.compile(project_file.search, project_file.search_flags)\n replace = project_file.replace % {\n \"date\": time.strftime(date_format),\n \"major\": proj_version.major,\n \"minor\": proj_version.minor,\n \"patch\": proj_version.patch,\n \"prerelease\": proj_version.prerelease,\n \"version\": str(proj_version),\n \"build\": proj_version.build,\n }\n\n # update project files\n with \\\n project_file.file.open(mode=\"r\", encoding=project_file.encoding) as fh_in, \\\n tempfile.NamedTemporaryFile(mode=\"w\", encoding=project_file.encoding, delete=False) as fh_out:\n if project_file.match == 'line':\n changes = 0\n for line in fh_in:\n (line, cnt) = rxp.subn(replace, line)\n if cnt:\n changes += cnt\n fh_out.write(line)\n\n if changes:\n counters['files'] += 1\n counters['changes'] += changes\n\n elif project_file.match == 'file':\n data = fh_in.read()\n (data, cnt) = rxp.subn(replace, data)\n if cnt:\n counters['files'] += 1\n counters['changes'] += cnt\n fh_out.write(data)\n\n else:\n raise ConfigError(\"Unknown match type: \\\"%s\\\"\" % project_file.match)\n\n fh_out.close()\n\n shutil.copystat(project_file.filename, fh_out.name)\n pathlib.Path(fh_out.name).rename(project_file.filename)\n\n return counters\n",
"def write(self, version):\n \"\"\"Save new version into self._path in safe way (using temporary file)\n\n :param version:Version\n \"\"\"\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:\n fh.write(str(version))\n\n if self._path.exists():\n shutil.copystat(str(self._path), fh.name)\n\n try:\n pathlib.Path(fh.name).rename(self._path)\n except OSError as exc:\n # handling situation with tmp file on another device\n if exc.errno == 18 and 'Invalid cross-device link' in exc.strerror:\n with self._path.open(mode='w') as fh:\n fh.write(str(version))\n else:\n raise\n"
] | """Helpers for commands related to manipulating files"""
import pathlib
import re
import shutil
import sys
import tempfile
import time
from versionner import vcs
from versionner.errors import ConfigError
def update_project_files(cfg, proj_version):
"""
Update version string in project files
:rtype : dict
:param cfg:project configuration
:param proj_version:current version
:return:dict :raise ValueError:
"""
counters = {'files': 0, 'changes': 0}
for project_file in cfg.files:
if not project_file.file.exists():
print("File \"%s\" not found" % project_file.filename, file=sys.stderr)
continue
# prepare data
date_format = project_file.date_format or cfg.date_format
rxp = re.compile(project_file.search, project_file.search_flags)
replace = project_file.replace % {
"date": time.strftime(date_format),
"major": proj_version.major,
"minor": proj_version.minor,
"patch": proj_version.patch,
"prerelease": proj_version.prerelease,
"version": str(proj_version),
"build": proj_version.build,
}
# update project files
with \
project_file.file.open(mode="r", encoding=project_file.encoding) as fh_in, \
tempfile.NamedTemporaryFile(mode="w", encoding=project_file.encoding, delete=False) as fh_out:
if project_file.match == 'line':
changes = 0
for line in fh_in:
(line, cnt) = rxp.subn(replace, line)
if cnt:
changes += cnt
fh_out.write(line)
if changes:
counters['files'] += 1
counters['changes'] += changes
elif project_file.match == 'file':
data = fh_in.read()
(data, cnt) = rxp.subn(replace, data)
if cnt:
counters['files'] += 1
counters['changes'] += cnt
fh_out.write(data)
else:
raise ConfigError("Unknown match type: \"%s\"" % project_file.match)
fh_out.close()
shutil.copystat(project_file.filename, fh_out.name)
pathlib.Path(fh_out.name).rename(project_file.filename)
return counters
|
msztolcman/versionner | versionner/utils.py | validate_python_version | python | def validate_python_version():
python_version = LooseVersion(platform.python_version())
minimal_version = LooseVersion('3.3.0')
if python_version < minimal_version:
print("Sorry, Python 3.3+ is required")
sys.exit(1) | Validate python interpreter version. Only 3.3+ allowed. | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/utils.py#L9-L15 | null | """Additional utilities"""
import platform
import sys
from distutils.version import LooseVersion
|
msztolcman/versionner | versionner/vcs/git.py | VCSCommandsBuilder.tag | python | def tag(version, params):
cmd = ['git', 'tag', '-a', '-m', 'v%s' % version, str(version)]
if params:
cmd.extend(params)
return cmd | Build and return full command to use with subprocess.Popen for 'git tag' command
:param version:
:param params:
:return: list | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L13-L24 | null | class VCSCommandsBuilder:
""" Build shell VCS command"""
@staticmethod
@staticmethod
def status():
"""Build and return full command to use with subprocess.Popen for 'git status' command
:return: list
"""
cmd = ['git', 'status', '--porcelain']
return cmd
@staticmethod
def commit(message):
"""Build and return full command to use with subprocess.Popen for 'git commit' command
:param message:
:return: list
"""
cmd = ['git', 'commit', '-m', message]
return cmd
@staticmethod
def add(paths):
"""Build and return full command to use with subprocess.Popen for 'git add' command
:param paths:
:return: list
"""
cmd = ['git', 'add'] + list(paths)
return cmd
|
msztolcman/versionner | versionner/vcs/git.py | VCSEngine._exec | python | def _exec(cmd):
process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# pylint: disable=unexpected-keyword-arg
(stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)
return process.returncode, stdout.decode(), stderr.decode() | Execute command using subprocess.Popen
:param cmd:
:return: (code, stdout, stderr) | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L66-L76 | null | class VCSEngine:
"""Main class for working with VCS"""
def __init__(self):
self._command = VCSCommandsBuilder()
@staticmethod
def create_tag(self, version, params):
"""Create VCS tag
:param version:
:param params:
:return:
"""
cmd = self._command.tag(version, params)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t create VCS tag %s. Process exited with code %d and message: %s' % (
version, code, stderr or stdout))
def raise_if_cant_commit(self):
"""Verify VCS status and raise an error if commit is disallowed
:return:
"""
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again")
def create_commit(self, message):
"""Create commit
:param message:
:return:
"""
cmd = self._command.commit(message)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Commit failed. Process exited with code %d and message: %s' % (
code, stderr or stdout))
def add_to_stage(self, paths):
"""Stage given files
:param paths:
:return:
"""
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout))
|
msztolcman/versionner | versionner/vcs/git.py | VCSEngine.create_tag | python | def create_tag(self, version, params):
cmd = self._command.tag(version, params)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t create VCS tag %s. Process exited with code %d and message: %s' % (
version, code, stderr or stdout)) | Create VCS tag
:param version:
:param params:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L78-L91 | [
"def _exec(cmd):\n \"\"\"Execute command using subprocess.Popen\n :param cmd:\n :return: (code, stdout, stderr)\n \"\"\"\n process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\n # pylint: disable=unexpected-keyword-arg\n (stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)\n\n return process.returncode, stdout.decode(), stderr.decode()\n"
] | class VCSEngine:
"""Main class for working with VCS"""
def __init__(self):
self._command = VCSCommandsBuilder()
@staticmethod
def _exec(cmd):
"""Execute command using subprocess.Popen
:param cmd:
:return: (code, stdout, stderr)
"""
process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# pylint: disable=unexpected-keyword-arg
(stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)
return process.returncode, stdout.decode(), stderr.decode()
def raise_if_cant_commit(self):
"""Verify VCS status and raise an error if commit is disallowed
:return:
"""
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again")
def create_commit(self, message):
"""Create commit
:param message:
:return:
"""
cmd = self._command.commit(message)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Commit failed. Process exited with code %d and message: %s' % (
code, stderr or stdout))
def add_to_stage(self, paths):
"""Stage given files
:param paths:
:return:
"""
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout))
|
msztolcman/versionner | versionner/vcs/git.py | VCSEngine.raise_if_cant_commit | python | def raise_if_cant_commit(self):
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again") | Verify VCS status and raise an error if commit is disallowed
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L93-L109 | [
"def _exec(cmd):\n \"\"\"Execute command using subprocess.Popen\n :param cmd:\n :return: (code, stdout, stderr)\n \"\"\"\n process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\n # pylint: disable=unexpected-keyword-arg\n (stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)\n\n return process.returncode, stdout.decode(), stderr.decode()\n"
] | class VCSEngine:
"""Main class for working with VCS"""
def __init__(self):
self._command = VCSCommandsBuilder()
@staticmethod
def _exec(cmd):
"""Execute command using subprocess.Popen
:param cmd:
:return: (code, stdout, stderr)
"""
process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# pylint: disable=unexpected-keyword-arg
(stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)
return process.returncode, stdout.decode(), stderr.decode()
def create_tag(self, version, params):
"""Create VCS tag
:param version:
:param params:
:return:
"""
cmd = self._command.tag(version, params)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t create VCS tag %s. Process exited with code %d and message: %s' % (
version, code, stderr or stdout))
def create_commit(self, message):
"""Create commit
:param message:
:return:
"""
cmd = self._command.commit(message)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Commit failed. Process exited with code %d and message: %s' % (
code, stderr or stdout))
def add_to_stage(self, paths):
"""Stage given files
:param paths:
:return:
"""
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout))
|
msztolcman/versionner | versionner/vcs/git.py | VCSEngine.create_commit | python | def create_commit(self, message):
cmd = self._command.commit(message)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Commit failed. Process exited with code %d and message: %s' % (
code, stderr or stdout)) | Create commit
:param message:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L111-L123 | [
"def _exec(cmd):\n \"\"\"Execute command using subprocess.Popen\n :param cmd:\n :return: (code, stdout, stderr)\n \"\"\"\n process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\n # pylint: disable=unexpected-keyword-arg\n (stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)\n\n return process.returncode, stdout.decode(), stderr.decode()\n"
] | class VCSEngine:
"""Main class for working with VCS"""
def __init__(self):
self._command = VCSCommandsBuilder()
@staticmethod
def _exec(cmd):
"""Execute command using subprocess.Popen
:param cmd:
:return: (code, stdout, stderr)
"""
process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# pylint: disable=unexpected-keyword-arg
(stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)
return process.returncode, stdout.decode(), stderr.decode()
def create_tag(self, version, params):
"""Create VCS tag
:param version:
:param params:
:return:
"""
cmd = self._command.tag(version, params)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t create VCS tag %s. Process exited with code %d and message: %s' % (
version, code, stderr or stdout))
def raise_if_cant_commit(self):
"""Verify VCS status and raise an error if commit is disallowed
:return:
"""
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again")
def add_to_stage(self, paths):
"""Stage given files
:param paths:
:return:
"""
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout))
|
msztolcman/versionner | versionner/vcs/git.py | VCSEngine.add_to_stage | python | def add_to_stage(self, paths):
cmd = self._command.add(paths)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t add paths to VCS. Process exited with code %d and message: %s' % (
code, stderr + stdout)) | Stage given files
:param paths:
:return: | train | https://github.com/msztolcman/versionner/blob/78fca02859e3e3eb71c9eb7ea230758944177c54/versionner/vcs/git.py#L125-L137 | [
"def _exec(cmd):\n \"\"\"Execute command using subprocess.Popen\n :param cmd:\n :return: (code, stdout, stderr)\n \"\"\"\n process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n\n # pylint: disable=unexpected-keyword-arg\n (stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)\n\n return process.returncode, stdout.decode(), stderr.decode()\n"
] | class VCSEngine:
"""Main class for working with VCS"""
def __init__(self):
self._command = VCSCommandsBuilder()
@staticmethod
def _exec(cmd):
"""Execute command using subprocess.Popen
:param cmd:
:return: (code, stdout, stderr)
"""
process = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# pylint: disable=unexpected-keyword-arg
(stdout, stderr) = process.communicate(timeout=defaults.DEFAULT_VCS_TIMEOUT)
return process.returncode, stdout.decode(), stderr.decode()
def create_tag(self, version, params):
"""Create VCS tag
:param version:
:param params:
:return:
"""
cmd = self._command.tag(version, params)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t create VCS tag %s. Process exited with code %d and message: %s' % (
version, code, stderr or stdout))
def raise_if_cant_commit(self):
"""Verify VCS status and raise an error if commit is disallowed
:return:
"""
cmd = self._command.status()
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Can\'t verify VCS status. Process exited with code %d and message: %s' % (
code, stderr or stdout))
for line in stdout.splitlines():
if line.startswith(('??', '!!')):
continue
raise errors.VCSStateError("VCS status doesn't allow to commit. Please commit or stash your changes and try again")
def create_commit(self, message):
"""Create commit
:param message:
:return:
"""
cmd = self._command.commit(message)
(code, stdout, stderr) = self._exec(cmd)
if code:
raise errors.VCSError('Commit failed. Process exited with code %d and message: %s' % (
code, stderr or stdout))
|
sentinelsat/sentinelsat | sentinelsat/scripts/cli.py | cli | python | def cli(user, password, geometry, start, end, uuid, name, download, sentinel, producttype,
instrument, cloud, footprints, path, query, url, order_by, limit):
_set_logger_handler()
if user is None or password is None:
try:
user, password = requests.utils.get_netrc_auth(url)
except TypeError:
pass
if user is None or password is None:
raise click.UsageError('Missing --user and --password. Please see docs '
'for environment variables and .netrc support.')
api = SentinelAPI(user, password, url)
search_kwargs = {}
if sentinel and not (producttype or instrument):
search_kwargs["platformname"] = "Sentinel-" + sentinel
if instrument and not producttype:
search_kwargs["instrumentshortname"] = instrument
if producttype:
search_kwargs["producttype"] = producttype
if cloud:
if sentinel not in ['2', '3']:
logger.error('Cloud cover is only supported for Sentinel 2 and 3.')
exit(1)
search_kwargs["cloudcoverpercentage"] = (0, cloud)
if query is not None:
search_kwargs.update((x.split('=') for x in query))
if geometry is not None:
search_kwargs['area'] = geojson_to_wkt(read_geojson(geometry))
if uuid is not None:
uuid_list = [x.strip() for x in uuid]
products = {}
for productid in uuid_list:
try:
products[productid] = api.get_product_odata(productid)
except SentinelAPIError as e:
if 'Invalid key' in e.msg:
logger.error('No product with ID \'%s\' exists on server', productid)
exit(1)
else:
raise
elif name is not None:
search_kwargs["identifier"] = name[0] if len(name) == 1 else '(' + ' OR '.join(name) + ')'
products = api.query(order_by=order_by, limit=limit, **search_kwargs)
else:
start = start or "19000101"
end = end or "NOW"
products = api.query(date=(start, end),
order_by=order_by, limit=limit, **search_kwargs)
if footprints is True:
footprints_geojson = api.to_geojson(products)
with open(os.path.join(path, "search_footprints.geojson"), "w") as outfile:
outfile.write(gj.dumps(footprints_geojson))
if download is True:
product_infos, triggered, failed_downloads = api.download_all(products, path)
if len(failed_downloads) > 0:
with open(os.path.join(path, "corrupt_scenes.txt"), "w") as outfile:
for failed_id in failed_downloads:
outfile.write("%s : %s\n" % (failed_id, products[failed_id]['title']))
else:
for product_id, props in products.items():
if uuid is None:
logger.info('Product %s - %s', product_id, props['summary'])
else: # querying uuids has no summary key
logger.info('Product %s - %s - %s MB', product_id, props['title'],
round(int(props['size']) / (1024. * 1024.), 2))
if uuid is None:
logger.info('---')
logger.info('%s scenes found with a total size of %.2f GB',
len(products), api.get_products_size(products)) | Search for Sentinel products and, optionally, download all the results
and/or create a geojson file with the search result footprints.
Beyond your Copernicus Open Access Hub user and password, you must pass a geojson file
containing the geometry of the area you want to search for or the UUIDs of the products. If you
don't specify the start and end dates, it will search in the last 24 hours. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/scripts/cli.py#L96-L183 | [
"def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4):\n \"\"\"Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.\n\n In case of FeatureCollection, only one of the features is used (the first by default).\n 3D points are converted to 2D.\n\n Parameters\n ----------\n geojson_obj : dict\n a GeoJSON object\n feature_number : int, optional\n Feature to extract polygon from (in case of MultiPolygon\n FeatureCollection), defaults to first Feature\n decimals : int, optional\n Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10\n meters).\n\n Returns\n -------\n polygon coordinates\n string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI\n \"\"\"\n if 'coordinates' in geojson_obj:\n geometry = geojson_obj\n elif 'geometry' in geojson_obj:\n geometry = geojson_obj['geometry']\n else:\n geometry = geojson_obj['features'][feature_number]['geometry']\n\n def ensure_2d(geometry):\n if isinstance(geometry[0], (list, tuple)):\n return list(map(ensure_2d, geometry))\n else:\n return geometry[:2]\n\n def check_bounds(geometry):\n if isinstance(geometry[0], (list, tuple)):\n return list(map(check_bounds, geometry))\n else:\n if geometry[0] > 180 or geometry[0] < -180:\n raise ValueError('Longitude is out of bounds, check your JSON format or data')\n if geometry[1] > 90 or geometry[1] < -90:\n raise ValueError('Latitude is out of bounds, check your JSON format or data')\n\n # Discard z-coordinate, if it exists\n geometry['coordinates'] = ensure_2d(geometry['coordinates'])\n check_bounds(geometry['coordinates'])\n\n wkt = geomet.wkt.dumps(geometry, decimals=decimals)\n # Strip unnecessary spaces\n wkt = re.sub(r'(?<!\\d) ', '', wkt)\n return wkt\n",
"def read_geojson(geojson_file):\n \"\"\"Read a GeoJSON file into a GeoJSON object.\n \"\"\"\n with open(geojson_file) as f:\n return geojson.load(f)\n",
"def _set_logger_handler(level='INFO'):\n logger.setLevel(level)\n h = logging.StreamHandler()\n h.setLevel(level)\n fmt = logging.Formatter('%(message)s')\n h.setFormatter(fmt)\n logger.addHandler(h)\n",
"def query(self, area=None, date=None, raw=None, area_relation='Intersects',\n order_by=None, limit=None, offset=0, **keywords):\n \"\"\"Query the OpenSearch API with the coordinates of an area, a date interval\n and any other search keywords accepted by the API.\n\n Parameters\n ----------\n area : str, optional\n The area of interest formatted as a Well-Known Text string.\n date : tuple of (str or datetime) or str, optional\n A time interval filter based on the Sensing Start Time of the products.\n Expects a tuple of (start, end), e.g. (\"NOW-1DAY\", \"NOW\").\n The timestamps can be either a Python datetime or a string in one of the\n following formats:\n\n - yyyyMMdd\n - yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)\n - yyyy-MM-ddThh:mm:ssZ\n - NOW\n - NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)\n - NOW+<n>DAY(S)\n - yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)\n - NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit\n\n Alternatively, an already fully formatted string such as \"[NOW-1DAY TO NOW]\" can be\n used as well.\n raw : str, optional\n Additional query text that will be appended to the query.\n area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional\n What relation to use for testing the AOI. Case insensitive.\n\n - Intersects: true if the AOI and the footprint intersect (default)\n - Contains: true if the AOI is inside the footprint\n - IsWithin: true if the footprint is inside the AOI\n\n order_by: str, optional\n A comma-separated list of fields to order by (on server side).\n Prefix the field name by '+' or '-' to sort in ascending or descending order,\n respectively. Ascending order is used if prefix is omitted.\n Example: \"cloudcoverpercentage, -beginposition\".\n limit: int, optional\n Maximum number of products returned. Defaults to no limit.\n offset: int, optional\n The number of results to skip. Defaults to 0.\n **keywords\n Additional keywords can be used to specify other query parameters,\n e.g. `relativeorbitnumber=70`.\n See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch\n for a full list.\n\n\n Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.\n `None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.\n Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.\n\n The time interval formats accepted by the `date` parameter can also be used with\n any other parameters that expect time intervals (that is: 'beginposition', 'endposition',\n 'date', 'creationdate', and 'ingestiondate').\n\n Returns\n -------\n dict[string, dict]\n Products returned by the query as a dictionary with the product ID as the key and\n the product's attributes (a dictionary) as the value.\n \"\"\"\n query = self.format_query(area, date, raw, area_relation, **keywords)\n\n self.logger.debug(\"Running query: order_by=%s, limit=%s, offset=%s, query=%s\",\n order_by, limit, offset, query)\n formatted_order_by = _format_order_by(order_by)\n response, count = self._load_query(query, formatted_order_by, limit, offset)\n self.logger.info(\"Found %s products\", count)\n return _parse_opensearch_response(response)\n",
"def to_geojson(products):\n \"\"\"Return the products from a query response as a GeoJSON with the values in their\n appropriate Python types.\n \"\"\"\n feature_list = []\n for i, (product_id, props) in enumerate(products.items()):\n props = props.copy()\n props['id'] = product_id\n poly = geomet.wkt.loads(props['footprint'])\n del props['footprint']\n del props['gmlfootprint']\n # Fix \"'datetime' is not JSON serializable\"\n for k, v in props.items():\n if isinstance(v, (date, datetime)):\n props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')\n feature_list.append(\n geojson.Feature(geometry=poly, id=i, properties=props)\n )\n return geojson.FeatureCollection(feature_list)\n",
"def get_product_odata(self, id, full=False):\n \"\"\"Access OData API to get info about a product.\n\n Returns a dict containing the id, title, size, md5sum, date, footprint and download url\n of the product. The date field corresponds to the Start ContentDate value.\n\n If `full` is set to True, then the full, detailed metadata of the product is returned\n in addition to the above.\n\n Parameters\n ----------\n id : string\n The UUID of the product to query\n full : bool\n Whether to get the full metadata for the Product. False by default.\n\n Returns\n -------\n dict[str, Any]\n A dictionary with an item for each metadata attribute\n\n Notes\n -----\n For a full list of mappings between the OpenSearch (Solr) and OData attribute names\n see the following definition files:\n https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl\n https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl\n https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl\n \"\"\"\n url = urljoin(self.api_url, u\"odata/v1/Products('{}')?$format=json\".format(id))\n if full:\n url += '&$expand=Attributes'\n response = self.session.get(url, auth=self.session.auth,\n timeout=self.timeout)\n _check_scihub_response(response)\n values = _parse_odata_response(response.json()['d'])\n return values\n",
"def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):\n \"\"\"Download a list of products.\n\n Takes a list of product IDs as input. This means that the return value of query() can be\n passed directly to this method.\n\n File names on the server are used for the downloaded files, e.g.\n \"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip\".\n\n In case of interruptions or other exceptions, downloading will restart from where it left\n off. Downloading is attempted at most max_attempts times to avoid getting stuck with\n unrecoverable errors.\n\n Parameters\n ----------\n products : list\n List of product IDs\n directory_path : string\n Directory where the downloaded files will be downloaded\n max_attempts : int, optional\n Number of allowed retries before giving up downloading a product. Defaults to 10.\n checksum : bool, optional\n If True, verify the downloaded files' integrity by checking its MD5 checksum.\n Throws InvalidChecksumError if the checksum does not match.\n Defaults to True.\n\n Raises\n ------\n Raises the most recent downloading exception if all downloads failed.\n\n Returns\n -------\n dict[string, dict]\n A dictionary containing the return value from download() for each successfully\n downloaded product.\n dict[string, dict]\n A dictionary containing the product information for products whose retrieval\n from the long term archive was successfully triggered.\n set[string]\n The list of products that failed to download.\n \"\"\"\n product_ids = list(products)\n self.logger.info(\"Will download %d products\", len(product_ids))\n return_values = OrderedDict()\n last_exception = None\n for i, product_id in enumerate(products):\n for attempt_num in range(max_attempts):\n try:\n product_info = self.download(product_id, directory_path, checksum)\n return_values[product_id] = product_info\n break\n except (KeyboardInterrupt, SystemExit):\n raise\n except InvalidChecksumError as e:\n last_exception = e\n self.logger.warning(\n \"Invalid checksum. The downloaded file for '%s' is corrupted.\", product_id)\n except SentinelAPILTAError as e:\n last_exception = e\n self.logger.exception(\"There was an error retrieving %s from the LTA\", product_id)\n break\n except Exception as e:\n last_exception = e\n self.logger.exception(\"There was an error downloading %s\", product_id)\n self.logger.info(\"%s/%s products downloaded\", i + 1, len(product_ids))\n failed = set(products) - set(return_values)\n\n # split up sucessfully processed products into downloaded and only triggered retrieval from the LTA\n triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])\n downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])\n\n if len(failed) == len(product_ids) and last_exception is not None:\n raise last_exception\n return downloaded, triggered, failed\n",
"def get_products_size(products):\n \"\"\"Return the total file size in GB of all products in the OpenSearch response.\"\"\"\n size_total = 0\n for title, props in products.items():\n size_product = props[\"size\"]\n size_value = float(size_product.split(\" \")[0])\n size_unit = str(size_product.split(\" \")[1])\n if size_unit == \"MB\":\n size_value /= 1024.\n if size_unit == \"KB\":\n size_value /= 1024. * 1024.\n size_total += size_value\n return round(size_total, 2)\n"
] | import logging
import os
import click
import geojson as gj
import requests.utils
from sentinelsat import __version__ as sentinelsat_version
from sentinelsat.sentinel import SentinelAPI, SentinelAPIError, geojson_to_wkt, read_geojson
logger = logging.getLogger('sentinelsat')
def _set_logger_handler(level='INFO'):
logger.setLevel(level)
h = logging.StreamHandler()
h.setLevel(level)
fmt = logging.Formatter('%(message)s')
h.setFormatter(fmt)
logger.addHandler(h)
class CommaSeparatedString(click.ParamType):
name = 'comma-string'
def convert(self, value, param, ctx):
if value:
return value.split(',')
else:
return value
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
'--user', '-u', type=str, envvar='DHUS_USER', default=None,
help='Username (or environment variable DHUS_USER is set)')
@click.option(
'--password', '-p', type=str, envvar='DHUS_PASSWORD', default=None,
help='Password (or environment variable DHUS_PASSWORD is set)')
@click.option(
'--url', type=str, default='https://scihub.copernicus.eu/apihub/', envvar='DHUS_URL',
help="""Define API URL. Default URL is
'https://scihub.copernicus.eu/apihub/' (or environment variable DHUS_URL).
""")
@click.option(
'--start', '-s', type=str, default='NOW-1DAY',
help='Start date of the query in the format YYYYMMDD.')
@click.option(
'--end', '-e', type=str, default='NOW',
help='End date of the query in the format YYYYMMDD.')
@click.option(
'--geometry', '-g', type=click.Path(exists=True),
help='Search area geometry as GeoJSON file.')
@click.option(
'--uuid', type=CommaSeparatedString(), default=None,
help='Select a specific product UUID instead of a query. Multiple UUIDs can separated by comma.')
@click.option(
'--name', type=CommaSeparatedString(), default=None,
help='Select specific product(s) by filename. Supports wildcards.')
@click.option(
'--sentinel', type=click.Choice(['1', '2', '3', '5']),
help='Limit search to a Sentinel satellite (constellation)')
@click.option(
'--instrument', type=click.Choice(['MSI', 'SAR-C SAR', 'SLSTR', 'OLCI', 'SRAL']),
help='Limit search to a specific instrument on a Sentinel satellite.')
@click.option(
'--producttype', type=str, default=None,
help='Limit search to a Sentinel product type.')
@click.option(
'-c', '--cloud', type=int,
help='Maximum cloud cover in percent. (requires --sentinel to be 2 or 3)')
@click.option(
'-o', '--order-by', type=str,
help="Comma-separated list of keywords to order the result by. "
"Prefix keywords with '-' for descending order.")
@click.option(
'-l', '--limit', type=int,
help='Maximum number of results to return. Defaults to no limit.')
@click.option(
'--download', '-d', is_flag=True,
help='Download all results of the query.')
@click.option(
'--path', type=click.Path(exists=True), default='.',
help='Set the path where the files will be saved.')
@click.option(
'--query', '-q', type=CommaSeparatedString(), default=None,
help="""Extra search keywords you want to use in the query. Separate
keywords with comma. Example: 'producttype=GRD,polarisationmode=HH'.
""")
@click.option(
'--footprints', is_flag=True,
help="""Create a geojson file search_footprints.geojson with footprints
and metadata of the returned products.
""")
@click.version_option(version=sentinelsat_version, prog_name="sentinelsat")
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | geojson_to_wkt | python | def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4):
if 'coordinates' in geojson_obj:
geometry = geojson_obj
elif 'geometry' in geojson_obj:
geometry = geojson_obj['geometry']
else:
geometry = geojson_obj['features'][feature_number]['geometry']
def ensure_2d(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(ensure_2d, geometry))
else:
return geometry[:2]
def check_bounds(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(check_bounds, geometry))
else:
if geometry[0] > 180 or geometry[0] < -180:
raise ValueError('Longitude is out of bounds, check your JSON format or data')
if geometry[1] > 90 or geometry[1] < -90:
raise ValueError('Latitude is out of bounds, check your JSON format or data')
# Discard z-coordinate, if it exists
geometry['coordinates'] = ensure_2d(geometry['coordinates'])
check_bounds(geometry['coordinates'])
wkt = geomet.wkt.dumps(geometry, decimals=decimals)
# Strip unnecessary spaces
wkt = re.sub(r'(?<!\d) ', '', wkt)
return wkt | Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.
In case of FeatureCollection, only one of the features is used (the first by default).
3D points are converted to 2D.
Parameters
----------
geojson_obj : dict
a GeoJSON object
feature_number : int, optional
Feature to extract polygon from (in case of MultiPolygon
FeatureCollection), defaults to first Feature
decimals : int, optional
Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10
meters).
Returns
-------
polygon coordinates
string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L889-L940 | [
"def ensure_2d(geometry):\n if isinstance(geometry[0], (list, tuple)):\n return list(map(ensure_2d, geometry))\n else:\n return geometry[:2]\n",
"def check_bounds(geometry):\n if isinstance(geometry[0], (list, tuple)):\n return list(map(check_bounds, geometry))\n else:\n if geometry[0] > 180 or geometry[0] < -180:\n raise ValueError('Longitude is out of bounds, check your JSON format or data')\n if geometry[1] > 90 or geometry[1] < -90:\n raise ValueError('Latitude is out of bounds, check your JSON format or data')\n"
] | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import hashlib
import logging
import re
import shutil
import warnings
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from contextlib import closing
from datetime import date, datetime, timedelta
from os import remove
from os.path import basename, exists, getsize, join, splitext
import geojson
import geomet.wkt
import html2text
import requests
from six import string_types
from six.moves.urllib.parse import urljoin, quote_plus
from tqdm import tqdm
from . import __version__ as sentinelsat_version
class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
class SentinelAPIError(Exception):
"""Invalid responses from DataHub.
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
def __str__(self):
return 'HTTP status {0} {1}: {2}'.format(
self.response.status_code, self.response.reason,
('\n' if '\n' in self.msg else '') + self.msg)
class SentinelAPILTAError(SentinelAPIError):
""" Error when retrieving a product from the Long Term Archive
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
class InvalidChecksumError(Exception):
"""MD5 checksum of a local file does not match the one from the server.
"""
pass
def read_geojson(geojson_file):
"""Read a GeoJSON file into a GeoJSON object.
"""
with open(geojson_file) as f:
return geojson.load(f)
def format_query_date(in_date):
"""
Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ
or validate a date string as suitable for the full text search interface and return it.
`None` will be converted to '\*', meaning an unlimited date bound in date ranges.
Parameters
----------
in_date : str or datetime or date or None
Date to be formatted
Returns
-------
str
Formatted string
Raises
------
ValueError
If the input date type is incorrect or passed date string is invalid
"""
if in_date is None:
return '*'
if isinstance(in_date, (datetime, date)):
return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')
elif not isinstance(in_date, string_types):
raise ValueError('Expected a string or a datetime object. Received {}.'.format(in_date))
in_date = in_date.strip()
if in_date == '*':
# '*' can be used for one-sided range queries e.g. ingestiondate:[* TO NOW-1YEAR]
return in_date
# Reference: https://cwiki.apache.org/confluence/display/solr/Working+with+Dates
# ISO-8601 date or NOW
valid_date_pattern = r'^(?:\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.\d+)?Z|NOW)'
# date arithmetic suffix is allowed
units = r'(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND)'
valid_date_pattern += r'(?:[-+]\d+{}S?)*'.format(units)
# dates can be rounded to a unit of time
# e.g. "NOW/DAY" for dates since 00:00 today
valid_date_pattern += r'(?:/{}S?)*$'.format(units)
in_date = in_date.strip()
if re.match(valid_date_pattern, in_date):
return in_date
try:
return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError('Unsupported date value {}'.format(in_date))
def _check_scihub_response(response, test_json=True):
"""Check that the response from server has status code 2xx and that the response is valid JSON.
"""
# Prevent requests from needing to guess the encoding
# SciHub appears to be using UTF-8 in all of their responses
response.encoding = 'utf-8'
try:
response.raise_for_status()
if test_json:
response.json()
except (requests.HTTPError, ValueError):
msg = "Invalid API response."
try:
msg = response.headers['cause-message']
except:
try:
msg = response.json()['error']['message']['value']
except:
if not response.text.strip().startswith('{'):
try:
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_anchors = True
msg = h.handle(response.text).strip()
except:
pass
api_error = SentinelAPIError(msg, response)
# Suppress "During handling of the above exception..." message
# See PEP 409
api_error.__cause__ = None
raise api_error
def _format_order_by(order_by):
if not order_by or not order_by.strip():
return None
output = []
for part in order_by.split(','):
part = part.strip()
dir = " asc"
if part[0] == '+':
part = part[1:]
elif part[0] == '-':
dir = " desc"
part = part[1:]
if not part or not part.isalnum():
raise ValueError("Invalid order by value ({})".format(order_by))
output.append(part + dir)
return ",".join(output)
def _parse_gml_footprint(geometry_str):
geometry_xml = ET.fromstring(geometry_str)
poly_coords_str = geometry_xml \
.find('{http://www.opengis.net/gml}outerBoundaryIs') \
.find('{http://www.opengis.net/gml}LinearRing') \
.findtext('{http://www.opengis.net/gml}coordinates')
poly_coords = (coord.split(",")[::-1] for coord in poly_coords_str.split(" "))
coord_string = ",".join(" ".join(coord) for coord in poly_coords)
return "POLYGON(({}))".format(coord_string)
def _parse_iso_date(content):
if '.' in content:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%SZ')
def _parse_odata_timestamp(in_date):
"""Convert the timestamp received from OData JSON API to a datetime object.
"""
timestamp = int(in_date.replace('/Date(', '').replace(')/', ''))
seconds = timestamp // 1000
ms = timestamp % 1000
return datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=ms)
def _parse_opensearch_response(products):
"""Convert a query response to a dictionary.
The resulting dictionary structure is {<product id>: {<property>: <value>}}.
The property values are converted to their respective Python types unless `parse_values`
is set to `False`.
"""
converters = {'date': _parse_iso_date, 'int': int, 'long': int, 'float': float, 'double': float}
# Keep the string type by default
default_converter = lambda x: x
output = OrderedDict()
for prod in products:
product_dict = {}
prod_id = prod['id']
output[prod_id] = product_dict
for key in prod:
if key == 'id':
continue
if isinstance(prod[key], string_types):
product_dict[key] = prod[key]
else:
properties = prod[key]
if isinstance(properties, dict):
properties = [properties]
if key == 'link':
for p in properties:
name = 'link'
if 'rel' in p:
name = 'link_' + p['rel']
product_dict[name] = p['href']
else:
f = converters.get(key, default_converter)
for p in properties:
try:
product_dict[p['name']] = f(p['content'])
except KeyError:
# Sentinel-3 has one element 'arr'
# which violates the name:content convention
product_dict[p['name']] = f(p['str'])
return output
def _parse_odata_response(product):
output = {
'id': product['Id'],
'title': product['Name'],
'size': int(product['ContentLength']),
product['Checksum']['Algorithm'].lower(): product['Checksum']['Value'],
'date': _parse_odata_timestamp(product['ContentDate']['Start']),
'footprint': _parse_gml_footprint(product["ContentGeometry"]),
'url': product['__metadata']['media_src'],
'Online': product.get('Online', True),
'Creation Date': _parse_odata_timestamp(product['CreationDate']),
'Ingestion Date': _parse_odata_timestamp(product['IngestionDate']),
}
# Parse the extended metadata, if provided
converters = [int, float, _parse_iso_date]
for attr in product['Attributes'].get('results', []):
value = attr['Value']
for f in converters:
try:
value = f(attr['Value'])
break
except ValueError:
pass
output[attr['Name']] = value
return output
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | format_query_date | python | def format_query_date(in_date):
if in_date is None:
return '*'
if isinstance(in_date, (datetime, date)):
return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')
elif not isinstance(in_date, string_types):
raise ValueError('Expected a string or a datetime object. Received {}.'.format(in_date))
in_date = in_date.strip()
if in_date == '*':
# '*' can be used for one-sided range queries e.g. ingestiondate:[* TO NOW-1YEAR]
return in_date
# Reference: https://cwiki.apache.org/confluence/display/solr/Working+with+Dates
# ISO-8601 date or NOW
valid_date_pattern = r'^(?:\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.\d+)?Z|NOW)'
# date arithmetic suffix is allowed
units = r'(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND)'
valid_date_pattern += r'(?:[-+]\d+{}S?)*'.format(units)
# dates can be rounded to a unit of time
# e.g. "NOW/DAY" for dates since 00:00 today
valid_date_pattern += r'(?:/{}S?)*$'.format(units)
in_date = in_date.strip()
if re.match(valid_date_pattern, in_date):
return in_date
try:
return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError('Unsupported date value {}'.format(in_date)) | Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ
or validate a date string as suitable for the full text search interface and return it.
`None` will be converted to '\*', meaning an unlimited date bound in date ranges.
Parameters
----------
in_date : str or datetime or date or None
Date to be formatted
Returns
-------
str
Formatted string
Raises
------
ValueError
If the input date type is incorrect or passed date string is invalid | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L943-L994 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import hashlib
import logging
import re
import shutil
import warnings
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from contextlib import closing
from datetime import date, datetime, timedelta
from os import remove
from os.path import basename, exists, getsize, join, splitext
import geojson
import geomet.wkt
import html2text
import requests
from six import string_types
from six.moves.urllib.parse import urljoin, quote_plus
from tqdm import tqdm
from . import __version__ as sentinelsat_version
class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
class SentinelAPIError(Exception):
"""Invalid responses from DataHub.
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
def __str__(self):
return 'HTTP status {0} {1}: {2}'.format(
self.response.status_code, self.response.reason,
('\n' if '\n' in self.msg else '') + self.msg)
class SentinelAPILTAError(SentinelAPIError):
""" Error when retrieving a product from the Long Term Archive
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
class InvalidChecksumError(Exception):
"""MD5 checksum of a local file does not match the one from the server.
"""
pass
def read_geojson(geojson_file):
"""Read a GeoJSON file into a GeoJSON object.
"""
with open(geojson_file) as f:
return geojson.load(f)
def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4):
"""Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.
In case of FeatureCollection, only one of the features is used (the first by default).
3D points are converted to 2D.
Parameters
----------
geojson_obj : dict
a GeoJSON object
feature_number : int, optional
Feature to extract polygon from (in case of MultiPolygon
FeatureCollection), defaults to first Feature
decimals : int, optional
Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10
meters).
Returns
-------
polygon coordinates
string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI
"""
if 'coordinates' in geojson_obj:
geometry = geojson_obj
elif 'geometry' in geojson_obj:
geometry = geojson_obj['geometry']
else:
geometry = geojson_obj['features'][feature_number]['geometry']
def ensure_2d(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(ensure_2d, geometry))
else:
return geometry[:2]
def check_bounds(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(check_bounds, geometry))
else:
if geometry[0] > 180 or geometry[0] < -180:
raise ValueError('Longitude is out of bounds, check your JSON format or data')
if geometry[1] > 90 or geometry[1] < -90:
raise ValueError('Latitude is out of bounds, check your JSON format or data')
# Discard z-coordinate, if it exists
geometry['coordinates'] = ensure_2d(geometry['coordinates'])
check_bounds(geometry['coordinates'])
wkt = geomet.wkt.dumps(geometry, decimals=decimals)
# Strip unnecessary spaces
wkt = re.sub(r'(?<!\d) ', '', wkt)
return wkt
def _check_scihub_response(response, test_json=True):
"""Check that the response from server has status code 2xx and that the response is valid JSON.
"""
# Prevent requests from needing to guess the encoding
# SciHub appears to be using UTF-8 in all of their responses
response.encoding = 'utf-8'
try:
response.raise_for_status()
if test_json:
response.json()
except (requests.HTTPError, ValueError):
msg = "Invalid API response."
try:
msg = response.headers['cause-message']
except:
try:
msg = response.json()['error']['message']['value']
except:
if not response.text.strip().startswith('{'):
try:
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_anchors = True
msg = h.handle(response.text).strip()
except:
pass
api_error = SentinelAPIError(msg, response)
# Suppress "During handling of the above exception..." message
# See PEP 409
api_error.__cause__ = None
raise api_error
def _format_order_by(order_by):
if not order_by or not order_by.strip():
return None
output = []
for part in order_by.split(','):
part = part.strip()
dir = " asc"
if part[0] == '+':
part = part[1:]
elif part[0] == '-':
dir = " desc"
part = part[1:]
if not part or not part.isalnum():
raise ValueError("Invalid order by value ({})".format(order_by))
output.append(part + dir)
return ",".join(output)
def _parse_gml_footprint(geometry_str):
geometry_xml = ET.fromstring(geometry_str)
poly_coords_str = geometry_xml \
.find('{http://www.opengis.net/gml}outerBoundaryIs') \
.find('{http://www.opengis.net/gml}LinearRing') \
.findtext('{http://www.opengis.net/gml}coordinates')
poly_coords = (coord.split(",")[::-1] for coord in poly_coords_str.split(" "))
coord_string = ",".join(" ".join(coord) for coord in poly_coords)
return "POLYGON(({}))".format(coord_string)
def _parse_iso_date(content):
if '.' in content:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%SZ')
def _parse_odata_timestamp(in_date):
"""Convert the timestamp received from OData JSON API to a datetime object.
"""
timestamp = int(in_date.replace('/Date(', '').replace(')/', ''))
seconds = timestamp // 1000
ms = timestamp % 1000
return datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=ms)
def _parse_opensearch_response(products):
"""Convert a query response to a dictionary.
The resulting dictionary structure is {<product id>: {<property>: <value>}}.
The property values are converted to their respective Python types unless `parse_values`
is set to `False`.
"""
converters = {'date': _parse_iso_date, 'int': int, 'long': int, 'float': float, 'double': float}
# Keep the string type by default
default_converter = lambda x: x
output = OrderedDict()
for prod in products:
product_dict = {}
prod_id = prod['id']
output[prod_id] = product_dict
for key in prod:
if key == 'id':
continue
if isinstance(prod[key], string_types):
product_dict[key] = prod[key]
else:
properties = prod[key]
if isinstance(properties, dict):
properties = [properties]
if key == 'link':
for p in properties:
name = 'link'
if 'rel' in p:
name = 'link_' + p['rel']
product_dict[name] = p['href']
else:
f = converters.get(key, default_converter)
for p in properties:
try:
product_dict[p['name']] = f(p['content'])
except KeyError:
# Sentinel-3 has one element 'arr'
# which violates the name:content convention
product_dict[p['name']] = f(p['str'])
return output
def _parse_odata_response(product):
output = {
'id': product['Id'],
'title': product['Name'],
'size': int(product['ContentLength']),
product['Checksum']['Algorithm'].lower(): product['Checksum']['Value'],
'date': _parse_odata_timestamp(product['ContentDate']['Start']),
'footprint': _parse_gml_footprint(product["ContentGeometry"]),
'url': product['__metadata']['media_src'],
'Online': product.get('Online', True),
'Creation Date': _parse_odata_timestamp(product['CreationDate']),
'Ingestion Date': _parse_odata_timestamp(product['IngestionDate']),
}
# Parse the extended metadata, if provided
converters = [int, float, _parse_iso_date]
for attr in product['Attributes'].get('results', []):
value = attr['Value']
for f in converters:
try:
value = f(attr['Value'])
break
except ValueError:
pass
output[attr['Name']] = value
return output
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | _check_scihub_response | python | def _check_scihub_response(response, test_json=True):
# Prevent requests from needing to guess the encoding
# SciHub appears to be using UTF-8 in all of their responses
response.encoding = 'utf-8'
try:
response.raise_for_status()
if test_json:
response.json()
except (requests.HTTPError, ValueError):
msg = "Invalid API response."
try:
msg = response.headers['cause-message']
except:
try:
msg = response.json()['error']['message']['value']
except:
if not response.text.strip().startswith('{'):
try:
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_anchors = True
msg = h.handle(response.text).strip()
except:
pass
api_error = SentinelAPIError(msg, response)
# Suppress "During handling of the above exception..." message
# See PEP 409
api_error.__cause__ = None
raise api_error | Check that the response from server has status code 2xx and that the response is valid JSON. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L997-L1027 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import hashlib
import logging
import re
import shutil
import warnings
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from contextlib import closing
from datetime import date, datetime, timedelta
from os import remove
from os.path import basename, exists, getsize, join, splitext
import geojson
import geomet.wkt
import html2text
import requests
from six import string_types
from six.moves.urllib.parse import urljoin, quote_plus
from tqdm import tqdm
from . import __version__ as sentinelsat_version
class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
class SentinelAPIError(Exception):
"""Invalid responses from DataHub.
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
def __str__(self):
return 'HTTP status {0} {1}: {2}'.format(
self.response.status_code, self.response.reason,
('\n' if '\n' in self.msg else '') + self.msg)
class SentinelAPILTAError(SentinelAPIError):
""" Error when retrieving a product from the Long Term Archive
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
class InvalidChecksumError(Exception):
"""MD5 checksum of a local file does not match the one from the server.
"""
pass
def read_geojson(geojson_file):
"""Read a GeoJSON file into a GeoJSON object.
"""
with open(geojson_file) as f:
return geojson.load(f)
def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4):
"""Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.
In case of FeatureCollection, only one of the features is used (the first by default).
3D points are converted to 2D.
Parameters
----------
geojson_obj : dict
a GeoJSON object
feature_number : int, optional
Feature to extract polygon from (in case of MultiPolygon
FeatureCollection), defaults to first Feature
decimals : int, optional
Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10
meters).
Returns
-------
polygon coordinates
string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI
"""
if 'coordinates' in geojson_obj:
geometry = geojson_obj
elif 'geometry' in geojson_obj:
geometry = geojson_obj['geometry']
else:
geometry = geojson_obj['features'][feature_number]['geometry']
def ensure_2d(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(ensure_2d, geometry))
else:
return geometry[:2]
def check_bounds(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(check_bounds, geometry))
else:
if geometry[0] > 180 or geometry[0] < -180:
raise ValueError('Longitude is out of bounds, check your JSON format or data')
if geometry[1] > 90 or geometry[1] < -90:
raise ValueError('Latitude is out of bounds, check your JSON format or data')
# Discard z-coordinate, if it exists
geometry['coordinates'] = ensure_2d(geometry['coordinates'])
check_bounds(geometry['coordinates'])
wkt = geomet.wkt.dumps(geometry, decimals=decimals)
# Strip unnecessary spaces
wkt = re.sub(r'(?<!\d) ', '', wkt)
return wkt
def format_query_date(in_date):
"""
Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ
or validate a date string as suitable for the full text search interface and return it.
`None` will be converted to '\*', meaning an unlimited date bound in date ranges.
Parameters
----------
in_date : str or datetime or date or None
Date to be formatted
Returns
-------
str
Formatted string
Raises
------
ValueError
If the input date type is incorrect or passed date string is invalid
"""
if in_date is None:
return '*'
if isinstance(in_date, (datetime, date)):
return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')
elif not isinstance(in_date, string_types):
raise ValueError('Expected a string or a datetime object. Received {}.'.format(in_date))
in_date = in_date.strip()
if in_date == '*':
# '*' can be used for one-sided range queries e.g. ingestiondate:[* TO NOW-1YEAR]
return in_date
# Reference: https://cwiki.apache.org/confluence/display/solr/Working+with+Dates
# ISO-8601 date or NOW
valid_date_pattern = r'^(?:\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.\d+)?Z|NOW)'
# date arithmetic suffix is allowed
units = r'(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND)'
valid_date_pattern += r'(?:[-+]\d+{}S?)*'.format(units)
# dates can be rounded to a unit of time
# e.g. "NOW/DAY" for dates since 00:00 today
valid_date_pattern += r'(?:/{}S?)*$'.format(units)
in_date = in_date.strip()
if re.match(valid_date_pattern, in_date):
return in_date
try:
return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError('Unsupported date value {}'.format(in_date))
def _format_order_by(order_by):
if not order_by or not order_by.strip():
return None
output = []
for part in order_by.split(','):
part = part.strip()
dir = " asc"
if part[0] == '+':
part = part[1:]
elif part[0] == '-':
dir = " desc"
part = part[1:]
if not part or not part.isalnum():
raise ValueError("Invalid order by value ({})".format(order_by))
output.append(part + dir)
return ",".join(output)
def _parse_gml_footprint(geometry_str):
geometry_xml = ET.fromstring(geometry_str)
poly_coords_str = geometry_xml \
.find('{http://www.opengis.net/gml}outerBoundaryIs') \
.find('{http://www.opengis.net/gml}LinearRing') \
.findtext('{http://www.opengis.net/gml}coordinates')
poly_coords = (coord.split(",")[::-1] for coord in poly_coords_str.split(" "))
coord_string = ",".join(" ".join(coord) for coord in poly_coords)
return "POLYGON(({}))".format(coord_string)
def _parse_iso_date(content):
if '.' in content:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%SZ')
def _parse_odata_timestamp(in_date):
"""Convert the timestamp received from OData JSON API to a datetime object.
"""
timestamp = int(in_date.replace('/Date(', '').replace(')/', ''))
seconds = timestamp // 1000
ms = timestamp % 1000
return datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=ms)
def _parse_opensearch_response(products):
"""Convert a query response to a dictionary.
The resulting dictionary structure is {<product id>: {<property>: <value>}}.
The property values are converted to their respective Python types unless `parse_values`
is set to `False`.
"""
converters = {'date': _parse_iso_date, 'int': int, 'long': int, 'float': float, 'double': float}
# Keep the string type by default
default_converter = lambda x: x
output = OrderedDict()
for prod in products:
product_dict = {}
prod_id = prod['id']
output[prod_id] = product_dict
for key in prod:
if key == 'id':
continue
if isinstance(prod[key], string_types):
product_dict[key] = prod[key]
else:
properties = prod[key]
if isinstance(properties, dict):
properties = [properties]
if key == 'link':
for p in properties:
name = 'link'
if 'rel' in p:
name = 'link_' + p['rel']
product_dict[name] = p['href']
else:
f = converters.get(key, default_converter)
for p in properties:
try:
product_dict[p['name']] = f(p['content'])
except KeyError:
# Sentinel-3 has one element 'arr'
# which violates the name:content convention
product_dict[p['name']] = f(p['str'])
return output
def _parse_odata_response(product):
output = {
'id': product['Id'],
'title': product['Name'],
'size': int(product['ContentLength']),
product['Checksum']['Algorithm'].lower(): product['Checksum']['Value'],
'date': _parse_odata_timestamp(product['ContentDate']['Start']),
'footprint': _parse_gml_footprint(product["ContentGeometry"]),
'url': product['__metadata']['media_src'],
'Online': product.get('Online', True),
'Creation Date': _parse_odata_timestamp(product['CreationDate']),
'Ingestion Date': _parse_odata_timestamp(product['IngestionDate']),
}
# Parse the extended metadata, if provided
converters = [int, float, _parse_iso_date]
for attr in product['Attributes'].get('results', []):
value = attr['Value']
for f in converters:
try:
value = f(attr['Value'])
break
except ValueError:
pass
output[attr['Name']] = value
return output
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | _parse_odata_timestamp | python | def _parse_odata_timestamp(in_date):
timestamp = int(in_date.replace('/Date(', '').replace(')/', ''))
seconds = timestamp // 1000
ms = timestamp % 1000
return datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=ms) | Convert the timestamp received from OData JSON API to a datetime object. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L1066-L1072 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import hashlib
import logging
import re
import shutil
import warnings
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from contextlib import closing
from datetime import date, datetime, timedelta
from os import remove
from os.path import basename, exists, getsize, join, splitext
import geojson
import geomet.wkt
import html2text
import requests
from six import string_types
from six.moves.urllib.parse import urljoin, quote_plus
from tqdm import tqdm
from . import __version__ as sentinelsat_version
class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
class SentinelAPIError(Exception):
"""Invalid responses from DataHub.
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
def __str__(self):
return 'HTTP status {0} {1}: {2}'.format(
self.response.status_code, self.response.reason,
('\n' if '\n' in self.msg else '') + self.msg)
class SentinelAPILTAError(SentinelAPIError):
""" Error when retrieving a product from the Long Term Archive
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
class InvalidChecksumError(Exception):
"""MD5 checksum of a local file does not match the one from the server.
"""
pass
def read_geojson(geojson_file):
"""Read a GeoJSON file into a GeoJSON object.
"""
with open(geojson_file) as f:
return geojson.load(f)
def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4):
"""Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.
In case of FeatureCollection, only one of the features is used (the first by default).
3D points are converted to 2D.
Parameters
----------
geojson_obj : dict
a GeoJSON object
feature_number : int, optional
Feature to extract polygon from (in case of MultiPolygon
FeatureCollection), defaults to first Feature
decimals : int, optional
Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10
meters).
Returns
-------
polygon coordinates
string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI
"""
if 'coordinates' in geojson_obj:
geometry = geojson_obj
elif 'geometry' in geojson_obj:
geometry = geojson_obj['geometry']
else:
geometry = geojson_obj['features'][feature_number]['geometry']
def ensure_2d(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(ensure_2d, geometry))
else:
return geometry[:2]
def check_bounds(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(check_bounds, geometry))
else:
if geometry[0] > 180 or geometry[0] < -180:
raise ValueError('Longitude is out of bounds, check your JSON format or data')
if geometry[1] > 90 or geometry[1] < -90:
raise ValueError('Latitude is out of bounds, check your JSON format or data')
# Discard z-coordinate, if it exists
geometry['coordinates'] = ensure_2d(geometry['coordinates'])
check_bounds(geometry['coordinates'])
wkt = geomet.wkt.dumps(geometry, decimals=decimals)
# Strip unnecessary spaces
wkt = re.sub(r'(?<!\d) ', '', wkt)
return wkt
def format_query_date(in_date):
"""
Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ
or validate a date string as suitable for the full text search interface and return it.
`None` will be converted to '\*', meaning an unlimited date bound in date ranges.
Parameters
----------
in_date : str or datetime or date or None
Date to be formatted
Returns
-------
str
Formatted string
Raises
------
ValueError
If the input date type is incorrect or passed date string is invalid
"""
if in_date is None:
return '*'
if isinstance(in_date, (datetime, date)):
return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')
elif not isinstance(in_date, string_types):
raise ValueError('Expected a string or a datetime object. Received {}.'.format(in_date))
in_date = in_date.strip()
if in_date == '*':
# '*' can be used for one-sided range queries e.g. ingestiondate:[* TO NOW-1YEAR]
return in_date
# Reference: https://cwiki.apache.org/confluence/display/solr/Working+with+Dates
# ISO-8601 date or NOW
valid_date_pattern = r'^(?:\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.\d+)?Z|NOW)'
# date arithmetic suffix is allowed
units = r'(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND)'
valid_date_pattern += r'(?:[-+]\d+{}S?)*'.format(units)
# dates can be rounded to a unit of time
# e.g. "NOW/DAY" for dates since 00:00 today
valid_date_pattern += r'(?:/{}S?)*$'.format(units)
in_date = in_date.strip()
if re.match(valid_date_pattern, in_date):
return in_date
try:
return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError('Unsupported date value {}'.format(in_date))
def _check_scihub_response(response, test_json=True):
"""Check that the response from server has status code 2xx and that the response is valid JSON.
"""
# Prevent requests from needing to guess the encoding
# SciHub appears to be using UTF-8 in all of their responses
response.encoding = 'utf-8'
try:
response.raise_for_status()
if test_json:
response.json()
except (requests.HTTPError, ValueError):
msg = "Invalid API response."
try:
msg = response.headers['cause-message']
except:
try:
msg = response.json()['error']['message']['value']
except:
if not response.text.strip().startswith('{'):
try:
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_anchors = True
msg = h.handle(response.text).strip()
except:
pass
api_error = SentinelAPIError(msg, response)
# Suppress "During handling of the above exception..." message
# See PEP 409
api_error.__cause__ = None
raise api_error
def _format_order_by(order_by):
if not order_by or not order_by.strip():
return None
output = []
for part in order_by.split(','):
part = part.strip()
dir = " asc"
if part[0] == '+':
part = part[1:]
elif part[0] == '-':
dir = " desc"
part = part[1:]
if not part or not part.isalnum():
raise ValueError("Invalid order by value ({})".format(order_by))
output.append(part + dir)
return ",".join(output)
def _parse_gml_footprint(geometry_str):
geometry_xml = ET.fromstring(geometry_str)
poly_coords_str = geometry_xml \
.find('{http://www.opengis.net/gml}outerBoundaryIs') \
.find('{http://www.opengis.net/gml}LinearRing') \
.findtext('{http://www.opengis.net/gml}coordinates')
poly_coords = (coord.split(",")[::-1] for coord in poly_coords_str.split(" "))
coord_string = ",".join(" ".join(coord) for coord in poly_coords)
return "POLYGON(({}))".format(coord_string)
def _parse_iso_date(content):
if '.' in content:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%SZ')
def _parse_opensearch_response(products):
"""Convert a query response to a dictionary.
The resulting dictionary structure is {<product id>: {<property>: <value>}}.
The property values are converted to their respective Python types unless `parse_values`
is set to `False`.
"""
converters = {'date': _parse_iso_date, 'int': int, 'long': int, 'float': float, 'double': float}
# Keep the string type by default
default_converter = lambda x: x
output = OrderedDict()
for prod in products:
product_dict = {}
prod_id = prod['id']
output[prod_id] = product_dict
for key in prod:
if key == 'id':
continue
if isinstance(prod[key], string_types):
product_dict[key] = prod[key]
else:
properties = prod[key]
if isinstance(properties, dict):
properties = [properties]
if key == 'link':
for p in properties:
name = 'link'
if 'rel' in p:
name = 'link_' + p['rel']
product_dict[name] = p['href']
else:
f = converters.get(key, default_converter)
for p in properties:
try:
product_dict[p['name']] = f(p['content'])
except KeyError:
# Sentinel-3 has one element 'arr'
# which violates the name:content convention
product_dict[p['name']] = f(p['str'])
return output
def _parse_odata_response(product):
output = {
'id': product['Id'],
'title': product['Name'],
'size': int(product['ContentLength']),
product['Checksum']['Algorithm'].lower(): product['Checksum']['Value'],
'date': _parse_odata_timestamp(product['ContentDate']['Start']),
'footprint': _parse_gml_footprint(product["ContentGeometry"]),
'url': product['__metadata']['media_src'],
'Online': product.get('Online', True),
'Creation Date': _parse_odata_timestamp(product['CreationDate']),
'Ingestion Date': _parse_odata_timestamp(product['IngestionDate']),
}
# Parse the extended metadata, if provided
converters = [int, float, _parse_iso_date]
for attr in product['Attributes'].get('results', []):
value = attr['Value']
for f in converters:
try:
value = f(attr['Value'])
break
except ValueError:
pass
output[attr['Name']] = value
return output
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | _parse_opensearch_response | python | def _parse_opensearch_response(products):
converters = {'date': _parse_iso_date, 'int': int, 'long': int, 'float': float, 'double': float}
# Keep the string type by default
default_converter = lambda x: x
output = OrderedDict()
for prod in products:
product_dict = {}
prod_id = prod['id']
output[prod_id] = product_dict
for key in prod:
if key == 'id':
continue
if isinstance(prod[key], string_types):
product_dict[key] = prod[key]
else:
properties = prod[key]
if isinstance(properties, dict):
properties = [properties]
if key == 'link':
for p in properties:
name = 'link'
if 'rel' in p:
name = 'link_' + p['rel']
product_dict[name] = p['href']
else:
f = converters.get(key, default_converter)
for p in properties:
try:
product_dict[p['name']] = f(p['content'])
except KeyError:
# Sentinel-3 has one element 'arr'
# which violates the name:content convention
product_dict[p['name']] = f(p['str'])
return output | Convert a query response to a dictionary.
The resulting dictionary structure is {<product id>: {<property>: <value>}}.
The property values are converted to their respective Python types unless `parse_values`
is set to `False`. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L1075-L1116 | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import hashlib
import logging
import re
import shutil
import warnings
import xml.etree.ElementTree as ET
from collections import OrderedDict, defaultdict
from contextlib import closing
from datetime import date, datetime, timedelta
from os import remove
from os.path import basename, exists, getsize, join, splitext
import geojson
import geomet.wkt
import html2text
import requests
from six import string_types
from six.moves.urllib.parse import urljoin, quote_plus
from tqdm import tqdm
from . import __version__ as sentinelsat_version
class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
class SentinelAPIError(Exception):
"""Invalid responses from DataHub.
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
def __str__(self):
return 'HTTP status {0} {1}: {2}'.format(
self.response.status_code, self.response.reason,
('\n' if '\n' in self.msg else '') + self.msg)
class SentinelAPILTAError(SentinelAPIError):
""" Error when retrieving a product from the Long Term Archive
Attributes
----------
msg: str
The error message.
response: requests.Response
The response from the server as a `requests.Response` object.
"""
def __init__(self, msg=None, response=None):
self.msg = msg
self.response = response
class InvalidChecksumError(Exception):
"""MD5 checksum of a local file does not match the one from the server.
"""
pass
def read_geojson(geojson_file):
"""Read a GeoJSON file into a GeoJSON object.
"""
with open(geojson_file) as f:
return geojson.load(f)
def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4):
"""Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries.
In case of FeatureCollection, only one of the features is used (the first by default).
3D points are converted to 2D.
Parameters
----------
geojson_obj : dict
a GeoJSON object
feature_number : int, optional
Feature to extract polygon from (in case of MultiPolygon
FeatureCollection), defaults to first Feature
decimals : int, optional
Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10
meters).
Returns
-------
polygon coordinates
string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI
"""
if 'coordinates' in geojson_obj:
geometry = geojson_obj
elif 'geometry' in geojson_obj:
geometry = geojson_obj['geometry']
else:
geometry = geojson_obj['features'][feature_number]['geometry']
def ensure_2d(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(ensure_2d, geometry))
else:
return geometry[:2]
def check_bounds(geometry):
if isinstance(geometry[0], (list, tuple)):
return list(map(check_bounds, geometry))
else:
if geometry[0] > 180 or geometry[0] < -180:
raise ValueError('Longitude is out of bounds, check your JSON format or data')
if geometry[1] > 90 or geometry[1] < -90:
raise ValueError('Latitude is out of bounds, check your JSON format or data')
# Discard z-coordinate, if it exists
geometry['coordinates'] = ensure_2d(geometry['coordinates'])
check_bounds(geometry['coordinates'])
wkt = geomet.wkt.dumps(geometry, decimals=decimals)
# Strip unnecessary spaces
wkt = re.sub(r'(?<!\d) ', '', wkt)
return wkt
def format_query_date(in_date):
"""
Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ
or validate a date string as suitable for the full text search interface and return it.
`None` will be converted to '\*', meaning an unlimited date bound in date ranges.
Parameters
----------
in_date : str or datetime or date or None
Date to be formatted
Returns
-------
str
Formatted string
Raises
------
ValueError
If the input date type is incorrect or passed date string is invalid
"""
if in_date is None:
return '*'
if isinstance(in_date, (datetime, date)):
return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')
elif not isinstance(in_date, string_types):
raise ValueError('Expected a string or a datetime object. Received {}.'.format(in_date))
in_date = in_date.strip()
if in_date == '*':
# '*' can be used for one-sided range queries e.g. ingestiondate:[* TO NOW-1YEAR]
return in_date
# Reference: https://cwiki.apache.org/confluence/display/solr/Working+with+Dates
# ISO-8601 date or NOW
valid_date_pattern = r'^(?:\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.\d+)?Z|NOW)'
# date arithmetic suffix is allowed
units = r'(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND)'
valid_date_pattern += r'(?:[-+]\d+{}S?)*'.format(units)
# dates can be rounded to a unit of time
# e.g. "NOW/DAY" for dates since 00:00 today
valid_date_pattern += r'(?:/{}S?)*$'.format(units)
in_date = in_date.strip()
if re.match(valid_date_pattern, in_date):
return in_date
try:
return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')
except ValueError:
raise ValueError('Unsupported date value {}'.format(in_date))
def _check_scihub_response(response, test_json=True):
"""Check that the response from server has status code 2xx and that the response is valid JSON.
"""
# Prevent requests from needing to guess the encoding
# SciHub appears to be using UTF-8 in all of their responses
response.encoding = 'utf-8'
try:
response.raise_for_status()
if test_json:
response.json()
except (requests.HTTPError, ValueError):
msg = "Invalid API response."
try:
msg = response.headers['cause-message']
except:
try:
msg = response.json()['error']['message']['value']
except:
if not response.text.strip().startswith('{'):
try:
h = html2text.HTML2Text()
h.ignore_images = True
h.ignore_anchors = True
msg = h.handle(response.text).strip()
except:
pass
api_error = SentinelAPIError(msg, response)
# Suppress "During handling of the above exception..." message
# See PEP 409
api_error.__cause__ = None
raise api_error
def _format_order_by(order_by):
if not order_by or not order_by.strip():
return None
output = []
for part in order_by.split(','):
part = part.strip()
dir = " asc"
if part[0] == '+':
part = part[1:]
elif part[0] == '-':
dir = " desc"
part = part[1:]
if not part or not part.isalnum():
raise ValueError("Invalid order by value ({})".format(order_by))
output.append(part + dir)
return ",".join(output)
def _parse_gml_footprint(geometry_str):
geometry_xml = ET.fromstring(geometry_str)
poly_coords_str = geometry_xml \
.find('{http://www.opengis.net/gml}outerBoundaryIs') \
.find('{http://www.opengis.net/gml}LinearRing') \
.findtext('{http://www.opengis.net/gml}coordinates')
poly_coords = (coord.split(",")[::-1] for coord in poly_coords_str.split(" "))
coord_string = ",".join(" ".join(coord) for coord in poly_coords)
return "POLYGON(({}))".format(coord_string)
def _parse_iso_date(content):
if '.' in content:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%S.%fZ')
else:
return datetime.strptime(content, '%Y-%m-%dT%H:%M:%SZ')
def _parse_odata_timestamp(in_date):
"""Convert the timestamp received from OData JSON API to a datetime object.
"""
timestamp = int(in_date.replace('/Date(', '').replace(')/', ''))
seconds = timestamp // 1000
ms = timestamp % 1000
return datetime.utcfromtimestamp(seconds) + timedelta(milliseconds=ms)
def _parse_odata_response(product):
output = {
'id': product['Id'],
'title': product['Name'],
'size': int(product['ContentLength']),
product['Checksum']['Algorithm'].lower(): product['Checksum']['Value'],
'date': _parse_odata_timestamp(product['ContentDate']['Start']),
'footprint': _parse_gml_footprint(product["ContentGeometry"]),
'url': product['__metadata']['media_src'],
'Online': product.get('Online', True),
'Creation Date': _parse_odata_timestamp(product['CreationDate']),
'Ingestion Date': _parse_odata_timestamp(product['IngestionDate']),
}
# Parse the extended metadata, if provided
converters = [int, float, _parse_iso_date]
for attr in product['Attributes'].get('results', []):
value = attr['Value']
for f in converters:
try:
value = f(attr['Value'])
break
except ValueError:
pass
output[attr['Name']] = value
return output
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.query | python | def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response) | Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L77-L149 | [
"def _format_order_by(order_by):\n if not order_by or not order_by.strip():\n return None\n output = []\n for part in order_by.split(','):\n part = part.strip()\n dir = \" asc\"\n if part[0] == '+':\n part = part[1:]\n elif part[0] == '-':\n dir = \" desc\"\n part = part[1:]\n if not part or not part.isalnum():\n raise ValueError(\"Invalid order by value ({})\".format(order_by))\n output.append(part + dir)\n return \",\".join(output)\n",
"def _parse_opensearch_response(products):\n \"\"\"Convert a query response to a dictionary.\n\n The resulting dictionary structure is {<product id>: {<property>: <value>}}.\n The property values are converted to their respective Python types unless `parse_values`\n is set to `False`.\n \"\"\"\n\n converters = {'date': _parse_iso_date, 'int': int, 'long': int, 'float': float, 'double': float}\n # Keep the string type by default\n default_converter = lambda x: x\n\n output = OrderedDict()\n for prod in products:\n product_dict = {}\n prod_id = prod['id']\n output[prod_id] = product_dict\n for key in prod:\n if key == 'id':\n continue\n if isinstance(prod[key], string_types):\n product_dict[key] = prod[key]\n else:\n properties = prod[key]\n if isinstance(properties, dict):\n properties = [properties]\n if key == 'link':\n for p in properties:\n name = 'link'\n if 'rel' in p:\n name = 'link_' + p['rel']\n product_dict[name] = p['href']\n else:\n f = converters.get(key, default_converter)\n for p in properties:\n try:\n product_dict[p['name']] = f(p['content'])\n except KeyError:\n # Sentinel-3 has one element 'arr'\n # which violates the name:content convention\n product_dict[p['name']] = f(p['str'])\n return output\n",
"def format_query(area=None, date=None, raw=None, area_relation='Intersects',\n **keywords):\n \"\"\"Create a OpenSearch API query string.\n \"\"\"\n if area_relation.lower() not in {\"intersects\", \"contains\", \"iswithin\"}:\n raise ValueError(\"Incorrect AOI relation provided ({})\".format(area_relation))\n\n # Check for duplicate keywords\n kw_lower = set(x.lower() for x in keywords)\n if (len(kw_lower) != len(keywords) or\n (date is not None and 'beginposition' in kw_lower) or\n (area is not None and 'footprint' in kw_lower)):\n raise ValueError(\"Query contains duplicate keywords. Note that query keywords are case-insensitive.\")\n\n query_parts = []\n\n if date is not None:\n keywords['beginPosition'] = date\n\n for attr, value in sorted(keywords.items()):\n # Escape spaces, where appropriate\n if isinstance(value, string_types):\n value = value.strip()\n if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):\n value = re.sub(r'\\s', r'\\ ', value, re.M)\n\n # Handle date keywords\n # Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601\n date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']\n if attr.lower() in date_attrs:\n # Automatically format date-type attributes\n if isinstance(value, string_types) and ' TO ' in value:\n # This is a string already formatted as a date interval,\n # e.g. '[NOW-1DAY TO NOW]'\n pass\n elif not isinstance(value, string_types) and len(value) == 2:\n value = (format_query_date(value[0]), format_query_date(value[1]))\n else:\n raise ValueError(\"Date-type query parameter '{}' expects a two-element tuple \"\n \"of str or datetime objects. Received {}\".format(attr, value))\n\n # Handle ranged values\n if isinstance(value, (list, tuple)):\n # Handle value ranges\n if len(value) == 2:\n # Allow None to be used as a unlimited bound\n value = ['*' if x is None else x for x in value]\n if all(x == '*' for x in value):\n continue\n value = '[{} TO {}]'.format(*value)\n else:\n raise ValueError(\"Invalid number of elements in list. Expected 2, received \"\n \"{}\".format(len(value)))\n\n query_parts.append('{}:{}'.format(attr, value))\n\n if raw:\n query_parts.append(raw)\n\n if area is not None:\n query_parts.append('footprint:\"{}({})\"'.format(area_relation, area))\n\n return ' '.join(query_parts)\n",
"def _load_query(self, query, order_by=None, limit=None, offset=0):\n products, count = self._load_subquery(query, order_by, limit, offset)\n\n # repeat query until all results have been loaded\n max_offset = count\n if limit is not None:\n max_offset = min(count, offset + limit)\n if max_offset > offset + self.page_size:\n progress = self._tqdm(desc=\"Querying products\",\n initial=self.page_size,\n total=max_offset - offset,\n unit=' products')\n for new_offset in range(offset + self.page_size, max_offset, self.page_size):\n new_limit = limit\n if limit is not None:\n new_limit = limit - new_offset + offset\n ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]\n progress.update(len(ret))\n products += ret\n progress.close()\n\n return products, count\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.format_query | python | def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts) | Create a OpenSearch API query string. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L152-L214 | [
"def format_query_date(in_date):\n \"\"\"\n Format a date, datetime or a YYYYMMDD string input as YYYY-MM-DDThh:mm:ssZ\n or validate a date string as suitable for the full text search interface and return it.\n\n `None` will be converted to '\\*', meaning an unlimited date bound in date ranges.\n\n Parameters\n ----------\n in_date : str or datetime or date or None\n Date to be formatted\n\n Returns\n -------\n str\n Formatted string\n\n Raises\n ------\n ValueError\n If the input date type is incorrect or passed date string is invalid\n \"\"\"\n if in_date is None:\n return '*'\n if isinstance(in_date, (datetime, date)):\n return in_date.strftime('%Y-%m-%dT%H:%M:%SZ')\n elif not isinstance(in_date, string_types):\n raise ValueError('Expected a string or a datetime object. Received {}.'.format(in_date))\n\n in_date = in_date.strip()\n if in_date == '*':\n # '*' can be used for one-sided range queries e.g. ingestiondate:[* TO NOW-1YEAR]\n return in_date\n\n # Reference: https://cwiki.apache.org/confluence/display/solr/Working+with+Dates\n\n # ISO-8601 date or NOW\n valid_date_pattern = r'^(?:\\d{4}-\\d\\d-\\d\\dT\\d\\d:\\d\\d:\\d\\d(?:\\.\\d+)?Z|NOW)'\n # date arithmetic suffix is allowed\n units = r'(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND)'\n valid_date_pattern += r'(?:[-+]\\d+{}S?)*'.format(units)\n # dates can be rounded to a unit of time\n # e.g. \"NOW/DAY\" for dates since 00:00 today\n valid_date_pattern += r'(?:/{}S?)*$'.format(units)\n in_date = in_date.strip()\n if re.match(valid_date_pattern, in_date):\n return in_date\n\n try:\n return datetime.strptime(in_date, '%Y%m%d').strftime('%Y-%m-%dT%H:%M:%SZ')\n except ValueError:\n raise ValueError('Unsupported date value {}'.format(in_date))\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.query_raw | python | def query_raw(self, query, order_by=None, limit=None, offset=0):
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset) | Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L216-L247 | [
"def query(self, area=None, date=None, raw=None, area_relation='Intersects',\n order_by=None, limit=None, offset=0, **keywords):\n \"\"\"Query the OpenSearch API with the coordinates of an area, a date interval\n and any other search keywords accepted by the API.\n\n Parameters\n ----------\n area : str, optional\n The area of interest formatted as a Well-Known Text string.\n date : tuple of (str or datetime) or str, optional\n A time interval filter based on the Sensing Start Time of the products.\n Expects a tuple of (start, end), e.g. (\"NOW-1DAY\", \"NOW\").\n The timestamps can be either a Python datetime or a string in one of the\n following formats:\n\n - yyyyMMdd\n - yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)\n - yyyy-MM-ddThh:mm:ssZ\n - NOW\n - NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)\n - NOW+<n>DAY(S)\n - yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)\n - NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit\n\n Alternatively, an already fully formatted string such as \"[NOW-1DAY TO NOW]\" can be\n used as well.\n raw : str, optional\n Additional query text that will be appended to the query.\n area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional\n What relation to use for testing the AOI. Case insensitive.\n\n - Intersects: true if the AOI and the footprint intersect (default)\n - Contains: true if the AOI is inside the footprint\n - IsWithin: true if the footprint is inside the AOI\n\n order_by: str, optional\n A comma-separated list of fields to order by (on server side).\n Prefix the field name by '+' or '-' to sort in ascending or descending order,\n respectively. Ascending order is used if prefix is omitted.\n Example: \"cloudcoverpercentage, -beginposition\".\n limit: int, optional\n Maximum number of products returned. Defaults to no limit.\n offset: int, optional\n The number of results to skip. Defaults to 0.\n **keywords\n Additional keywords can be used to specify other query parameters,\n e.g. `relativeorbitnumber=70`.\n See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch\n for a full list.\n\n\n Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.\n `None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.\n Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.\n\n The time interval formats accepted by the `date` parameter can also be used with\n any other parameters that expect time intervals (that is: 'beginposition', 'endposition',\n 'date', 'creationdate', and 'ingestiondate').\n\n Returns\n -------\n dict[string, dict]\n Products returned by the query as a dictionary with the product ID as the key and\n the product's attributes (a dictionary) as the value.\n \"\"\"\n query = self.format_query(area, date, raw, area_relation, **keywords)\n\n self.logger.debug(\"Running query: order_by=%s, limit=%s, offset=%s, query=%s\",\n order_by, limit, offset, query)\n formatted_order_by = _format_order_by(order_by)\n response, count = self._load_query(query, formatted_order_by, limit, offset)\n self.logger.info(\"Found %s products\", count)\n return _parse_opensearch_response(response)\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.count | python | def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count | Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L249-L269 | [
"def format_query(area=None, date=None, raw=None, area_relation='Intersects',\n **keywords):\n \"\"\"Create a OpenSearch API query string.\n \"\"\"\n if area_relation.lower() not in {\"intersects\", \"contains\", \"iswithin\"}:\n raise ValueError(\"Incorrect AOI relation provided ({})\".format(area_relation))\n\n # Check for duplicate keywords\n kw_lower = set(x.lower() for x in keywords)\n if (len(kw_lower) != len(keywords) or\n (date is not None and 'beginposition' in kw_lower) or\n (area is not None and 'footprint' in kw_lower)):\n raise ValueError(\"Query contains duplicate keywords. Note that query keywords are case-insensitive.\")\n\n query_parts = []\n\n if date is not None:\n keywords['beginPosition'] = date\n\n for attr, value in sorted(keywords.items()):\n # Escape spaces, where appropriate\n if isinstance(value, string_types):\n value = value.strip()\n if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):\n value = re.sub(r'\\s', r'\\ ', value, re.M)\n\n # Handle date keywords\n # Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601\n date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']\n if attr.lower() in date_attrs:\n # Automatically format date-type attributes\n if isinstance(value, string_types) and ' TO ' in value:\n # This is a string already formatted as a date interval,\n # e.g. '[NOW-1DAY TO NOW]'\n pass\n elif not isinstance(value, string_types) and len(value) == 2:\n value = (format_query_date(value[0]), format_query_date(value[1]))\n else:\n raise ValueError(\"Date-type query parameter '{}' expects a two-element tuple \"\n \"of str or datetime objects. Received {}\".format(attr, value))\n\n # Handle ranged values\n if isinstance(value, (list, tuple)):\n # Handle value ranges\n if len(value) == 2:\n # Allow None to be used as a unlimited bound\n value = ['*' if x is None else x for x in value]\n if all(x == '*' for x in value):\n continue\n value = '[{} TO {}]'.format(*value)\n else:\n raise ValueError(\"Invalid number of elements in list. Expected 2, received \"\n \"{}\".format(len(value)))\n\n query_parts.append('{}:{}'.format(attr, value))\n\n if raw:\n query_parts.append(raw)\n\n if area is not None:\n query_parts.append('footprint:\"{}({})\"'.format(area_relation, area))\n\n return ' '.join(query_parts)\n",
"def _load_query(self, query, order_by=None, limit=None, offset=0):\n products, count = self._load_subquery(query, order_by, limit, offset)\n\n # repeat query until all results have been loaded\n max_offset = count\n if limit is not None:\n max_offset = min(count, offset + limit)\n if max_offset > offset + self.page_size:\n progress = self._tqdm(desc=\"Querying products\",\n initial=self.page_size,\n total=max_offset - offset,\n unit=' products')\n for new_offset in range(offset + self.page_size, max_offset, self.page_size):\n new_limit = limit\n if limit is not None:\n new_limit = limit - new_offset + offset\n ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]\n progress.update(len(ret))\n products += ret\n progress.close()\n\n return products, count\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.to_geojson | python | def to_geojson(products):
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list) | Return the products from a query response as a GeoJSON with the values in their
appropriate Python types. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L340-L358 | null | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.to_dataframe | python | def to_dataframe(products):
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index') | Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L361-L370 | null | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.to_geodataframe | python | def to_geodataframe(products):
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry) | Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L373-L391 | [
"def to_dataframe(products):\n \"\"\"Return the products from a query response as a Pandas DataFrame\n with the values in their appropriate Python types.\n \"\"\"\n try:\n import pandas as pd\n except ImportError:\n raise ImportError(\"to_dataframe requires the optional dependency Pandas.\")\n\n return pd.DataFrame.from_dict(products, orient='index')\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.get_product_odata | python | def get_product_odata(self, id, full=False):
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values | Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L393-L429 | [
"def _parse_odata_response(product):\n output = {\n 'id': product['Id'],\n 'title': product['Name'],\n 'size': int(product['ContentLength']),\n product['Checksum']['Algorithm'].lower(): product['Checksum']['Value'],\n 'date': _parse_odata_timestamp(product['ContentDate']['Start']),\n 'footprint': _parse_gml_footprint(product[\"ContentGeometry\"]),\n 'url': product['__metadata']['media_src'],\n 'Online': product.get('Online', True),\n 'Creation Date': _parse_odata_timestamp(product['CreationDate']),\n 'Ingestion Date': _parse_odata_timestamp(product['IngestionDate']),\n }\n # Parse the extended metadata, if provided\n converters = [int, float, _parse_iso_date]\n for attr in product['Attributes'].get('results', []):\n value = attr['Value']\n for f in converters:\n try:\n value = f(attr['Value'])\n break\n except ValueError:\n pass\n output[attr['Name']] = value\n return output\n",
"def _check_scihub_response(response, test_json=True):\n \"\"\"Check that the response from server has status code 2xx and that the response is valid JSON.\n \"\"\"\n # Prevent requests from needing to guess the encoding\n # SciHub appears to be using UTF-8 in all of their responses\n response.encoding = 'utf-8'\n try:\n response.raise_for_status()\n if test_json:\n response.json()\n except (requests.HTTPError, ValueError):\n msg = \"Invalid API response.\"\n try:\n msg = response.headers['cause-message']\n except:\n try:\n msg = response.json()['error']['message']['value']\n except:\n if not response.text.strip().startswith('{'):\n try:\n h = html2text.HTML2Text()\n h.ignore_images = True\n h.ignore_anchors = True\n msg = h.handle(response.text).strip()\n except:\n pass\n api_error = SentinelAPIError(msg, response)\n # Suppress \"During handling of the above exception...\" message\n # See PEP 409\n api_error.__cause__ = None\n raise api_error\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI._trigger_offline_retrieval | python | def _trigger_offline_retrieval(self, url):
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code | Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L431-L461 | null | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def download(self, id, directory_path='.', checksum=True):
"""Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server.
"""
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
sentinelsat/sentinelsat | sentinelsat/sentinel.py | SentinelAPI.download | python | def download(self, id, directory_path='.', checksum=True):
product_info = self.get_product_odata(id)
path = join(directory_path, product_info['title'] + '.zip')
product_info['path'] = path
product_info['downloaded_bytes'] = 0
self.logger.info('Downloading %s to %s', id, path)
if exists(path):
# We assume that the product has been downloaded and is complete
return product_info
# An incomplete download triggers the retrieval from the LTA if the product is not online
if not product_info['Online']:
self.logger.warning(
'Product %s is not online. Triggering retrieval from long term archive.',
product_info['id'])
self._trigger_offline_retrieval(product_info['url'])
return product_info
# Use a temporary file for downloading
temp_path = path + '.incomplete'
skip_download = False
if exists(temp_path):
if getsize(temp_path) > product_info['size']:
self.logger.warning(
"Existing incomplete file %s is larger than the expected final size"
" (%s vs %s bytes). Deleting it.",
str(temp_path), getsize(temp_path), product_info['size'])
remove(temp_path)
elif getsize(temp_path) == product_info['size']:
if self._md5_compare(temp_path, product_info['md5']):
skip_download = True
else:
# Log a warning since this should never happen
self.logger.warning(
"Existing incomplete file %s appears to be fully downloaded but "
"its checksum is incorrect. Deleting it.",
str(temp_path))
remove(temp_path)
else:
# continue downloading
self.logger.info(
"Download will resume from existing incomplete file %s.", temp_path)
pass
if not skip_download:
# Store the number of downloaded bytes for unit tests
product_info['downloaded_bytes'] = self._download(
product_info['url'], temp_path, self.session, product_info['size'])
# Check integrity with MD5 checksum
if checksum is True:
if not self._md5_compare(temp_path, product_info['md5']):
remove(temp_path)
raise InvalidChecksumError('File corrupt: checksums do not match')
# Download successful, rename the temporary file to its proper name
shutil.move(temp_path, path)
return product_info | Download a product.
Uses the filename on the server for the downloaded file, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
Incomplete downloads are continued and complete files are skipped.
Parameters
----------
id : string
UUID of the product, e.g. 'a8dd0cfd-613e-45ce-868c-d79177b916ed'
directory_path : string, optional
Where the file will be downloaded
checksum : bool, optional
If True, verify the downloaded file's integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Returns
-------
product_info : dict
Dictionary containing the product's info from get_product_info() as well as
the path on disk.
Raises
------
InvalidChecksumError
If the MD5 checksum does not match the checksum on the server. | train | https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L463-L552 | [
"def get_product_odata(self, id, full=False):\n \"\"\"Access OData API to get info about a product.\n\n Returns a dict containing the id, title, size, md5sum, date, footprint and download url\n of the product. The date field corresponds to the Start ContentDate value.\n\n If `full` is set to True, then the full, detailed metadata of the product is returned\n in addition to the above.\n\n Parameters\n ----------\n id : string\n The UUID of the product to query\n full : bool\n Whether to get the full metadata for the Product. False by default.\n\n Returns\n -------\n dict[str, Any]\n A dictionary with an item for each metadata attribute\n\n Notes\n -----\n For a full list of mappings between the OpenSearch (Solr) and OData attribute names\n see the following definition files:\n https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl\n https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl\n https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl\n \"\"\"\n url = urljoin(self.api_url, u\"odata/v1/Products('{}')?$format=json\".format(id))\n if full:\n url += '&$expand=Attributes'\n response = self.session.get(url, auth=self.session.auth,\n timeout=self.timeout)\n _check_scihub_response(response)\n values = _parse_odata_response(response.json()['d'])\n return values\n",
"def _trigger_offline_retrieval(self, url):\n \"\"\" Triggers retrieval of an offline product\n\n Trying to download an offline product triggers its retrieval from the long term archive.\n The returned HTTP status code conveys whether this was successful.\n\n Parameters\n ----------\n url : string\n URL for downloading the product\n\n Notes\n -----\n https://scihub.copernicus.eu/userguide/LongTermArchive\n\n \"\"\"\n with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:\n # check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes\n if r.status_code == 202:\n self.logger.info(\"Accepted for retrieval\")\n elif r.status_code == 503:\n self.logger.error(\"Request not accepted\")\n raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)\n elif r.status_code == 403:\n self.logger.error(\"Requests exceed user quota\")\n raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)\n elif r.status_code == 500:\n # should not happen\n self.logger.error(\"Trying to download an offline product\")\n raise SentinelAPILTAError('Trying to download an offline product', r)\n return r.status_code\n"
] | class SentinelAPI:
"""Class to connect to Copernicus Open Access Hub, search and download imagery.
Parameters
----------
user : string
username for DataHub
set to None to use ~/.netrc
password : string
password for DataHub
set to None to use ~/.netrc
api_url : string, optional
URL of the DataHub
defaults to 'https://scihub.copernicus.eu/apihub'
show_progressbars : bool
Whether progressbars should be shown or not, e.g. during download. Defaults to True.
timeout : float or tuple, optional
How long to wait for DataHub response (in seconds).
Tuple (connect, read) allowed.
Attributes
----------
session : requests.Session
Session to connect to DataHub
api_url : str
URL to the DataHub
page_size : int
Number of results per query page.
Current value: 100 (maximum allowed on ApiHub)
timeout : float or tuple
How long to wait for DataHub response (in seconds).
"""
logger = logging.getLogger('sentinelsat.SentinelAPI')
def __init__(self, user, password, api_url='https://scihub.copernicus.eu/apihub/',
show_progressbars=True, timeout=None):
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
self.api_url = api_url if api_url.endswith('/') else api_url + '/'
self.page_size = 100
self.user_agent = 'sentinelsat/' + sentinelsat_version
self.session.headers['User-Agent'] = self.user_agent
self.show_progressbars = show_progressbars
self.timeout = timeout
# For unit tests
self._last_query = None
self._last_response = None
def query(self, area=None, date=None, raw=None, area_relation='Intersects',
order_by=None, limit=None, offset=0, **keywords):
"""Query the OpenSearch API with the coordinates of an area, a date interval
and any other search keywords accepted by the API.
Parameters
----------
area : str, optional
The area of interest formatted as a Well-Known Text string.
date : tuple of (str or datetime) or str, optional
A time interval filter based on the Sensing Start Time of the products.
Expects a tuple of (start, end), e.g. ("NOW-1DAY", "NOW").
The timestamps can be either a Python datetime or a string in one of the
following formats:
- yyyyMMdd
- yyyy-MM-ddThh:mm:ss.SSSZ (ISO-8601)
- yyyy-MM-ddThh:mm:ssZ
- NOW
- NOW-<n>DAY(S) (or HOUR(S), MONTH(S), etc.)
- NOW+<n>DAY(S)
- yyyy-MM-ddThh:mm:ssZ-<n>DAY(S)
- NOW/DAY (or HOUR, MONTH etc.) - rounds the value to the given unit
Alternatively, an already fully formatted string such as "[NOW-1DAY TO NOW]" can be
used as well.
raw : str, optional
Additional query text that will be appended to the query.
area_relation : {'Intersects', 'Contains', 'IsWithin'}, optional
What relation to use for testing the AOI. Case insensitive.
- Intersects: true if the AOI and the footprint intersect (default)
- Contains: true if the AOI is inside the footprint
- IsWithin: true if the footprint is inside the AOI
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order,
respectively. Ascending order is used if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
**keywords
Additional keywords can be used to specify other query parameters,
e.g. `relativeorbitnumber=70`.
See https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
for a full list.
Range values can be passed as two-element tuples, e.g. `cloudcoverpercentage=(0, 30)`.
`None` can be used in range values for one-sided ranges, e.g. `orbitnumber=(16302, None)`.
Ranges with no bounds (`orbitnumber=(None, None)`) will not be included in the query.
The time interval formats accepted by the `date` parameter can also be used with
any other parameters that expect time intervals (that is: 'beginposition', 'endposition',
'date', 'creationdate', and 'ingestiondate').
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
query = self.format_query(area, date, raw, area_relation, **keywords)
self.logger.debug("Running query: order_by=%s, limit=%s, offset=%s, query=%s",
order_by, limit, offset, query)
formatted_order_by = _format_order_by(order_by)
response, count = self._load_query(query, formatted_order_by, limit, offset)
self.logger.info("Found %s products", count)
return _parse_opensearch_response(response)
@staticmethod
def format_query(area=None, date=None, raw=None, area_relation='Intersects',
**keywords):
"""Create a OpenSearch API query string.
"""
if area_relation.lower() not in {"intersects", "contains", "iswithin"}:
raise ValueError("Incorrect AOI relation provided ({})".format(area_relation))
# Check for duplicate keywords
kw_lower = set(x.lower() for x in keywords)
if (len(kw_lower) != len(keywords) or
(date is not None and 'beginposition' in kw_lower) or
(area is not None and 'footprint' in kw_lower)):
raise ValueError("Query contains duplicate keywords. Note that query keywords are case-insensitive.")
query_parts = []
if date is not None:
keywords['beginPosition'] = date
for attr, value in sorted(keywords.items()):
# Escape spaces, where appropriate
if isinstance(value, string_types):
value = value.strip()
if not any(value.startswith(s[0]) and value.endswith(s[1]) for s in ['[]', '{}', '//', '()']):
value = re.sub(r'\s', r'\ ', value, re.M)
# Handle date keywords
# Keywords from https://github.com/SentinelDataHub/DataHubSystem/search?q=text/date+iso8601
date_attrs = ['beginposition', 'endposition', 'date', 'creationdate', 'ingestiondate']
if attr.lower() in date_attrs:
# Automatically format date-type attributes
if isinstance(value, string_types) and ' TO ' in value:
# This is a string already formatted as a date interval,
# e.g. '[NOW-1DAY TO NOW]'
pass
elif not isinstance(value, string_types) and len(value) == 2:
value = (format_query_date(value[0]), format_query_date(value[1]))
else:
raise ValueError("Date-type query parameter '{}' expects a two-element tuple "
"of str or datetime objects. Received {}".format(attr, value))
# Handle ranged values
if isinstance(value, (list, tuple)):
# Handle value ranges
if len(value) == 2:
# Allow None to be used as a unlimited bound
value = ['*' if x is None else x for x in value]
if all(x == '*' for x in value):
continue
value = '[{} TO {}]'.format(*value)
else:
raise ValueError("Invalid number of elements in list. Expected 2, received "
"{}".format(len(value)))
query_parts.append('{}:{}'.format(attr, value))
if raw:
query_parts.append(raw)
if area is not None:
query_parts.append('footprint:"{}({})"'.format(area_relation, area))
return ' '.join(query_parts)
def query_raw(self, query, order_by=None, limit=None, offset=0):
"""
Do a full-text query on the OpenSearch API using the format specified in
https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch
DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release.
Parameters
----------
query : str
The query string.
order_by: str, optional
A comma-separated list of fields to order by (on server side).
Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively.
Ascending order is used, if prefix is omitted.
Example: "cloudcoverpercentage, -beginposition".
limit: int, optional
Maximum number of products returned. Defaults to no limit.
offset: int, optional
The number of results to skip. Defaults to 0.
Returns
-------
dict[string, dict]
Products returned by the query as a dictionary with the product ID as the key and
the product's attributes (a dictionary) as the value.
"""
warnings.warn(
"query_raw() has been merged with query(). use query(raw=...) instead.",
PendingDeprecationWarning
)
return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def count(self, area=None, date=None, raw=None, area_relation='Intersects', **keywords):
"""Get the number of products matching a query.
Accepted parameters are identical to :meth:`SentinelAPI.query()`.
This is a significantly more efficient alternative to doing `len(api.query())`,
which can take minutes to run for queries matching thousands of products.
Returns
-------
int
The number of products matching a query.
"""
for kw in ['order_by', 'limit', 'offset']:
# Allow these function arguments to be included for compatibility with query(),
# but ignore them.
if kw in keywords:
del keywords[kw]
query = self.format_query(area, date, raw, area_relation, **keywords)
_, total_count = self._load_query(query, limit=0)
return total_count
def _load_query(self, query, order_by=None, limit=None, offset=0):
products, count = self._load_subquery(query, order_by, limit, offset)
# repeat query until all results have been loaded
max_offset = count
if limit is not None:
max_offset = min(count, offset + limit)
if max_offset > offset + self.page_size:
progress = self._tqdm(desc="Querying products",
initial=self.page_size,
total=max_offset - offset,
unit=' products')
for new_offset in range(offset + self.page_size, max_offset, self.page_size):
new_limit = limit
if limit is not None:
new_limit = limit - new_offset + offset
ret = self._load_subquery(query, order_by, new_limit, new_offset)[0]
progress.update(len(ret))
products += ret
progress.close()
return products, count
def _load_subquery(self, query, order_by=None, limit=None, offset=0):
# store last query (for testing)
self._last_query = query
self.logger.debug("Sub-query: offset=%s, limit=%s", offset, limit)
# load query results
url = self._format_url(order_by, limit, offset)
response = self.session.post(url, {'q': query}, auth=self.session.auth,
headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
timeout=self.timeout)
_check_scihub_response(response)
# store last status code (for testing)
self._last_response = response
# parse response content
try:
json_feed = response.json()['feed']
if json_feed['opensearch:totalResults'] is None:
# We are using some unintended behavior of the server that a null is
# returned as the total results value when the query string was incorrect.
raise SentinelAPIError(
'Invalid query string. Check the parameters and format.', response)
total_results = int(json_feed['opensearch:totalResults'])
except (ValueError, KeyError):
raise SentinelAPIError('API response not valid. JSON decoding failed.', response)
products = json_feed.get('entry', [])
# this verification is necessary because if the query returns only
# one product, self.products will be a dict not a list
if isinstance(products, dict):
products = [products]
return products, total_results
def _format_url(self, order_by=None, limit=None, offset=0):
if limit is None:
limit = self.page_size
limit = min(limit, self.page_size)
url = 'search?format=json&rows={}'.format(limit)
url += '&start={}'.format(offset)
if order_by:
url += '&orderby={}'.format(order_by)
return urljoin(self.api_url, url)
@staticmethod
def to_geojson(products):
"""Return the products from a query response as a GeoJSON with the values in their
appropriate Python types.
"""
feature_list = []
for i, (product_id, props) in enumerate(products.items()):
props = props.copy()
props['id'] = product_id
poly = geomet.wkt.loads(props['footprint'])
del props['footprint']
del props['gmlfootprint']
# Fix "'datetime' is not JSON serializable"
for k, v in props.items():
if isinstance(v, (date, datetime)):
props[k] = v.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
feature_list.append(
geojson.Feature(geometry=poly, id=i, properties=props)
)
return geojson.FeatureCollection(feature_list)
@staticmethod
def to_dataframe(products):
"""Return the products from a query response as a Pandas DataFrame
with the values in their appropriate Python types.
"""
try:
import pandas as pd
except ImportError:
raise ImportError("to_dataframe requires the optional dependency Pandas.")
return pd.DataFrame.from_dict(products, orient='index')
@staticmethod
def to_geodataframe(products):
"""Return the products from a query response as a GeoPandas GeoDataFrame
with the values in their appropriate Python types.
"""
try:
import geopandas as gpd
import shapely.wkt
except ImportError:
raise ImportError("to_geodataframe requires the optional dependencies GeoPandas and Shapely.")
crs = {'init': 'epsg:4326'} # WGS84
if len(products) == 0:
return gpd.GeoDataFrame(crs=crs)
df = SentinelAPI.to_dataframe(products)
geometry = [shapely.wkt.loads(fp) for fp in df['footprint']]
# remove useless columns
df.drop(['footprint', 'gmlfootprint'], axis=1, inplace=True)
return gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
def get_product_odata(self, id, full=False):
"""Access OData API to get info about a product.
Returns a dict containing the id, title, size, md5sum, date, footprint and download url
of the product. The date field corresponds to the Start ContentDate value.
If `full` is set to True, then the full, detailed metadata of the product is returned
in addition to the above.
Parameters
----------
id : string
The UUID of the product to query
full : bool
Whether to get the full metadata for the Product. False by default.
Returns
-------
dict[str, Any]
A dictionary with an item for each metadata attribute
Notes
-----
For a full list of mappings between the OpenSearch (Solr) and OData attribute names
see the following definition files:
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-1/src/main/resources/META-INF/sentinel-1.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-2/src/main/resources/META-INF/sentinel-2.owl
https://github.com/SentinelDataHub/DataHubSystem/blob/master/addon/sentinel-3/src/main/resources/META-INF/sentinel-3.owl
"""
url = urljoin(self.api_url, u"odata/v1/Products('{}')?$format=json".format(id))
if full:
url += '&$expand=Attributes'
response = self.session.get(url, auth=self.session.auth,
timeout=self.timeout)
_check_scihub_response(response)
values = _parse_odata_response(response.json()['d'])
return values
def _trigger_offline_retrieval(self, url):
""" Triggers retrieval of an offline product
Trying to download an offline product triggers its retrieval from the long term archive.
The returned HTTP status code conveys whether this was successful.
Parameters
----------
url : string
URL for downloading the product
Notes
-----
https://scihub.copernicus.eu/userguide/LongTermArchive
"""
with self.session.get(url, auth=self.session.auth, timeout=self.timeout) as r:
# check https://scihub.copernicus.eu/userguide/LongTermArchive#HTTP_Status_codes
if r.status_code == 202:
self.logger.info("Accepted for retrieval")
elif r.status_code == 503:
self.logger.error("Request not accepted")
raise SentinelAPILTAError('Request for retrieval from LTA not accepted', r)
elif r.status_code == 403:
self.logger.error("Requests exceed user quota")
raise SentinelAPILTAError('Requests for retrieval from LTA exceed user quota', r)
elif r.status_code == 500:
# should not happen
self.logger.error("Trying to download an offline product")
raise SentinelAPILTAError('Trying to download an offline product', r)
return r.status_code
def download_all(self, products, directory_path='.', max_attempts=10, checksum=True):
"""Download a list of products.
Takes a list of product IDs as input. This means that the return value of query() can be
passed directly to this method.
File names on the server are used for the downloaded files, e.g.
"S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.zip".
In case of interruptions or other exceptions, downloading will restart from where it left
off. Downloading is attempted at most max_attempts times to avoid getting stuck with
unrecoverable errors.
Parameters
----------
products : list
List of product IDs
directory_path : string
Directory where the downloaded files will be downloaded
max_attempts : int, optional
Number of allowed retries before giving up downloading a product. Defaults to 10.
checksum : bool, optional
If True, verify the downloaded files' integrity by checking its MD5 checksum.
Throws InvalidChecksumError if the checksum does not match.
Defaults to True.
Raises
------
Raises the most recent downloading exception if all downloads failed.
Returns
-------
dict[string, dict]
A dictionary containing the return value from download() for each successfully
downloaded product.
dict[string, dict]
A dictionary containing the product information for products whose retrieval
from the long term archive was successfully triggered.
set[string]
The list of products that failed to download.
"""
product_ids = list(products)
self.logger.info("Will download %d products", len(product_ids))
return_values = OrderedDict()
last_exception = None
for i, product_id in enumerate(products):
for attempt_num in range(max_attempts):
try:
product_info = self.download(product_id, directory_path, checksum)
return_values[product_id] = product_info
break
except (KeyboardInterrupt, SystemExit):
raise
except InvalidChecksumError as e:
last_exception = e
self.logger.warning(
"Invalid checksum. The downloaded file for '%s' is corrupted.", product_id)
except SentinelAPILTAError as e:
last_exception = e
self.logger.exception("There was an error retrieving %s from the LTA", product_id)
break
except Exception as e:
last_exception = e
self.logger.exception("There was an error downloading %s", product_id)
self.logger.info("%s/%s products downloaded", i + 1, len(product_ids))
failed = set(products) - set(return_values)
# split up sucessfully processed products into downloaded and only triggered retrieval from the LTA
triggered = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is False])
downloaded = OrderedDict([(k, v) for k, v in return_values.items() if v['Online'] is True])
if len(failed) == len(product_ids) and last_exception is not None:
raise last_exception
return downloaded, triggered, failed
@staticmethod
def get_products_size(products):
"""Return the total file size in GB of all products in the OpenSearch response."""
size_total = 0
for title, props in products.items():
size_product = props["size"]
size_value = float(size_product.split(" ")[0])
size_unit = str(size_product.split(" ")[1])
if size_unit == "MB":
size_value /= 1024.
if size_unit == "KB":
size_value /= 1024. * 1024.
size_total += size_value
return round(size_total, 2)
@staticmethod
def check_query_length(query):
"""Determine whether a query to the OpenSearch API is too long.
The length of a query string is limited to approximately 3938 characters but
any special characters (that is, not alphanumeric or -_.*) will take up more space.
Parameters
----------
query : str
The query string
Returns
-------
float
Ratio of the query length to the maximum length
"""
# The server uses the Java's URLEncoder implementation internally, which we are replicating here
effective_length = len(quote_plus(query, safe="-_.*").replace('~', '%7E'))
return effective_length / 3938
def _query_names(self, names):
"""Find products by their names, e.g.
S1A_EW_GRDH_1SDH_20141003T003840_20141003T003920_002658_002F54_4DD1.
Note that duplicates exist on server, so multiple products can be returned for each name.
Parameters
----------
names : list[string]
List of product names.
Returns
-------
dict[string, dict[str, dict]]
A dictionary mapping each name to a dictionary which contains the products with
that name (with ID as the key).
"""
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
products = {}
# 40 names per query fits reasonably well inside the query limit
for chunk in chunks(names, 40):
query = " OR ".join(chunk)
products.update(self.query(raw=query))
# Group the products
output = OrderedDict((name, dict()) for name in names)
for id, metadata in products.items():
name = metadata['identifier']
output[name][id] = metadata
return output
def check_files(self, paths=None, ids=None, directory=None, delete=False):
"""Verify the integrity of product files on disk.
Integrity is checked by comparing the size and checksum of the file with the respective
values on the server.
The input can be a list of products to check or a list of IDs and a directory.
In cases where multiple products with different IDs exist on the server for given product
name, the file is considered to be correct if any of them matches the file size and
checksum. A warning is logged in such situations.
The corrupt products' OData info is included in the return value to make it easier to
re-download the products, if necessary.
Parameters
----------
paths : list[string]
List of product file paths.
ids : list[string]
List of product IDs.
directory : string
Directory where the files are located, if checking based on product IDs.
delete : bool
Whether to delete corrupt products. Defaults to False.
Returns
-------
dict[str, list[dict]]
A dictionary listing the invalid or missing files. The dictionary maps the corrupt
file paths to a list of OData dictionaries of matching products on the server (as
returned by :meth:`SentinelAPI.get_product_odata()`).
"""
if not ids and not paths:
raise ValueError("Must provide either file paths or product IDs and a directory")
if ids and not directory:
raise ValueError("Directory value missing")
paths = paths or []
ids = ids or []
def name_from_path(path):
return splitext(basename(path))[0]
# Get product IDs corresponding to the files on disk
names = []
if paths:
names = list(map(name_from_path, paths))
result = self._query_names(names)
for product_dicts in result.values():
ids += list(product_dicts)
names_from_paths = set(names)
ids = set(ids)
# Collect the OData information for each product
# Product name -> list of matching odata dicts
product_infos = defaultdict(list)
for id in ids:
odata = self.get_product_odata(id)
name = odata['title']
product_infos[name].append(odata)
# Collect
if name not in names_from_paths:
paths.append(join(directory, name + '.zip'))
# Now go over the list of products and check them
corrupt = {}
for path in paths:
name = name_from_path(path)
if len(product_infos[name]) > 1:
self.logger.warning("{} matches multiple products on server".format(path))
if not exists(path):
# We will consider missing files as corrupt also
self.logger.info("{} does not exist on disk".format(path))
corrupt[path] = product_infos[name]
continue
is_fine = False
for product_info in product_infos[name]:
if (getsize(path) == product_info['size'] and
self._md5_compare(path, product_info['md5'])):
is_fine = True
break
if not is_fine:
self.logger.info("{} is corrupt".format(path))
corrupt[path] = product_infos[name]
if delete:
remove(path)
return corrupt
def _md5_compare(self, file_path, checksum, block_size=2 ** 13):
"""Compare a given MD5 checksum with one calculated from a file."""
with closing(self._tqdm(desc="MD5 checksumming", total=getsize(file_path), unit="B",
unit_scale=True)) as progress:
md5 = hashlib.md5()
with open(file_path, "rb") as f:
while True:
block_data = f.read(block_size)
if not block_data:
break
md5.update(block_data)
progress.update(len(block_data))
return md5.hexdigest().lower() == checksum.lower()
def _download(self, url, path, session, file_size):
headers = {}
continuing = exists(path)
if continuing:
already_downloaded_bytes = getsize(path)
headers = {'Range': 'bytes={}-'.format(already_downloaded_bytes)}
else:
already_downloaded_bytes = 0
downloaded_bytes = 0
with closing(session.get(url, stream=True, auth=session.auth,
headers=headers, timeout=self.timeout)) as r, \
closing(self._tqdm(desc="Downloading", total=file_size, unit="B",
unit_scale=True, initial=already_downloaded_bytes)) as progress:
_check_scihub_response(r, test_json=False)
chunk_size = 2 ** 20 # download in 1 MB chunks
mode = 'ab' if continuing else 'wb'
with open(path, mode) as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
progress.update(len(chunk))
downloaded_bytes += len(chunk)
# Return the number of bytes downloaded
return downloaded_bytes
def _tqdm(self, **kwargs):
"""tqdm progressbar wrapper. May be overridden to customize progressbar behavior"""
kwargs.update({'disable': not self.show_progressbars})
return tqdm(**kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.