repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
lightning-viz/lightning-python | lightning/types/utils.py | check_1d | python | def check_1d(x, name):
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x | Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...] | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L137-L149 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | polygon_to_mask | python | def polygon_to_mask(coords, dims, z=None):
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask | Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L295-L318 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_points(coords, z=None):
"""
Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon
"""
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points |
lightning-viz/lightning-python | lightning/types/utils.py | polygon_to_points | python | def polygon_to_points(coords, z=None):
bounds = array(coords).astype('int')
bmax = bounds.max(0)
bmin = bounds.min(0)
path = Path(bounds)
grid = meshgrid(range(bmin[0], bmax[0]+1), range(bmin[1], bmax[1]+1))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
points = path.contains_points(grid_flat).reshape(grid[0].shape).astype('int')
points = where(points)
points = (vstack([points[0], points[1]]).T + bmin[-1::-1]).tolist()
if z is not None:
points = map(lambda p: [p[0], p[1], z], points)
return points | Given a list of pairs of points which define a polygon,
return a list of points interior to the polygon | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/types/utils.py#L321-L344 | null | from numpy import asarray, array, ndarray, vstack, newaxis, nonzero, concatenate, \
transpose, atleast_2d, size, isscalar, meshgrid, where, zeros, ones
from matplotlib.path import Path
import ast
def add_property(d, prop, name, **kwargs):
if prop is not None:
p = check_property(prop, name, **kwargs)
d[name] = p
return d
def check_property(prop, name, **kwargs):
"""
Check and parse a property with either a specific checking function
or a generic parser
"""
checkers = {
'color': check_color,
'alpha': check_alpha,
'size': check_size,
'thickness': check_thickness,
'index': check_index,
'coordinates': check_coordinates,
'colormap': check_colormap,
'bins': check_bins,
'spec': check_spec
}
if name in checkers:
return checkers[name](prop, **kwargs)
elif isinstance(prop, list) or isinstance(prop, ndarray) or isscalar(prop):
return check_1d(prop, name)
else:
return prop
def check_coordinates(co, xy=None):
"""
Check and parse coordinates as either a single coordinate list [[r,c],[r,c]] or a
list of coordinates for multiple regions [[[r0,c0],[r0,c0]], [[r1,c1],[r1,c1]]]
"""
if isinstance(co, ndarray):
co = co.tolist()
if not (isinstance(co[0][0], list) or isinstance(co[0][0], tuple)):
co = [co]
if xy is not True:
co = map(lambda p: asarray(p)[:, ::-1].tolist(), co)
return co
def check_bins(b):
return b
def check_color(c):
"""
Check and parse color specs as either a single [r,g,b] or a list of
[[r,g,b],[r,g,b]...]
"""
c = asarray(c)
if c.ndim == 1:
c = c.flatten()
c = c[newaxis, :]
if c.shape[1] != 3:
raise Exception("Color must have three values per point")
elif c.ndim == 2:
if c.shape[1] != 3:
raise Exception("Color array must have three values per point")
return c
def check_colormap(cmap):
"""
Check if cmap is one of the colorbrewer maps
"""
names = set(['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu',
'PuBuGn', 'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd',
'Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3', 'Lightning'])
if cmap not in names:
raise Exception("Invalid cmap '%s', must be one of %s" % (cmap, names))
else:
return cmap
def check_size(s):
"""
Check and parse size specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "size")
if any(map(lambda d: d <= 0, s)):
raise Exception('Size cannot be 0 or negative')
return s
def check_thickness(s):
"""
Check and parse thickness specs as either a single [s] or a list of [s,s,s,...]
"""
s = check_1d(s, "thickness")
if any(map(lambda d: d <= 0, s)):
raise Exception('Thickness cannot be 0 or negative')
return s
def check_index(i):
"""
Checks and parses an index spec, must be a one-dimensional array [i0, i1, ...]
"""
i = asarray(i)
if (i.ndim > 1) or (size(i) < 1):
raise Exception("Index must be one-dimensional and non-singleton")
return i
def check_alpha(a):
"""
Check and parse alpha specs as either a single [a] or a list of [a,a,a,...]
"""
a = check_1d(a, "alpha")
if any(map(lambda d: d <= 0, a)):
raise Exception('Alpha cannot be 0 or negative')
return a
def check_1d(x, name):
"""
Check and parse a one-dimensional spec as either a single [x] or a list of [x,x,x...]
"""
x = asarray(x)
if size(x) == 1:
x = asarray([x])
if x.ndim == 2:
raise Exception("Property: %s must be one-dimensional" % name)
x = x.flatten()
return x
def check_spec(spec):
try:
import altair
if type(spec) == altair.api.Viz:
spec = spec.to_dict()
except ImportError:
pass
if type(spec) == str:
import ast
spec = ast.literal_eval(spec)
return spec
def array_to_lines(data):
data = asarray(data)
return data
def vecs_to_points(x, y):
x = asarray(x)
y = asarray(y)
if x.ndim > 1 or y.ndim > 1:
raise Exception('x and y vectors must be one-dimensional')
if size(x) != size(y):
raise Exception('x and y vectors must be the same length')
points = vstack([x, y]).T
return points
def vecs_to_points_three(x, y, z):
x = asarray(x)
y = asarray(y)
z = asarray(z)
if x.ndim > 1 or y.ndim > 1 or z.ndim > 1:
raise Exception('x, y, and z vectors must be one-dimensional')
if (size(x) != size(y)) or (size(x) != size(z)) or (size(y) != size(z)):
raise Exception('x, y, and z vectors must be the same length')
points = vstack([x, y, z]).T
return points
def mat_to_array(mat):
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
return mat
def mat_to_links(mat):
# get nonzero entries as list with the source, target, and value as columns
mat = asarray(mat)
if mat.ndim < 2:
raise Exception('Matrix input must be two-dimensional')
inds = nonzero(mat)
links = concatenate((transpose(nonzero(mat)), atleast_2d(mat[inds]).T), axis=1)
return links
def parse_nodes(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
nodes = list(range(0, len(data)))
else:
nodes = list(range(0, int(max(max(data[:, 0]), max(data[:, 1])) + 1)))
return nodes
def parse_links(data):
data = asarray(data)
if data.shape[0] == data.shape[1]:
links = mat_to_links(data)
else:
if len(data[0]) == 2:
links = concatenate((data, ones((len(data), 1))), axis=1)
elif len(data[0]) == 3:
links = data
else:
raise ValueError("Too many entries per link, must be 2 or 3, got %g" % len(data[0]))
return links
def array_to_im(im):
from matplotlib.pyplot import imsave
from matplotlib.pyplot import cm
import io
im = asarray(im)
imfile = io.BytesIO()
if im.ndim == 3:
# if 3D, show as RGB
imsave(imfile, im, format="png")
else:
# if 2D, show as grayscale
imsave(imfile, im, format="png", cmap=cm.gray)
if im.ndim > 3:
raise Exception("Images must be 2 or 3 dimensions")
return imfile.getvalue()
def list_to_regions(reg):
if isinstance(reg, str):
return [reg]
if isinstance(reg, list):
checktwo = all(map(lambda x: len(x) == 2, reg))
checkthree = all(map(lambda x: len(x) == 3, reg))
if not (checktwo or checkthree):
raise Exception("All region names must be two letters (for US) or three letters (for world)")
return reg
def polygon_to_mask(coords, dims, z=None):
"""
Given a list of pairs of points which define a polygon, return a binary
mask covering the interior of the polygon with dimensions dim
"""
bounds = array(coords).astype('int')
path = Path(bounds)
grid = meshgrid(range(dims[1]), range(dims[0]))
grid_flat = zip(grid[0].ravel(), grid[1].ravel())
mask = path.contains_points(grid_flat).reshape(dims[0:2]).astype('int')
if z is not None:
if len(dims) < 3:
raise Exception('Dims must have three-dimensions for embedding z-index')
if z >= dims[2]:
raise Exception('Z-index %g exceeds third dimension %g' % (z, dims[2]))
tmp = zeros(dims)
tmp[:, :, z] = mask
mask = tmp
return mask
|
lightning-viz/lightning-python | lightning/visualization.py | VisualizationLocal.save_html | python | def save_html(self, filename=None, overwrite=False):
if filename is None:
raise ValueError('Please provide a filename, e.g. viz.save_html(filename="viz.html").')
import os
base = self._html
js = self.load_embed()
if os.path.exists(filename):
if overwrite is False:
raise ValueError("File '%s' exists. To ovewrite call save_html with overwrite=True."
% os.path.abspath(filename))
else:
os.remove(filename)
with open(filename, "wb") as f:
f.write(base.encode('utf-8'))
f.write('<script>' + js.encode('utf-8') + '</script>') | Save self-contained html to a file.
Parameters
----------
filename : str
The filename to save to | train | https://github.com/lightning-viz/lightning-python/blob/68563e1da82d162d204069d7586f7c695b8bd4a6/lightning/visualization.py#L173-L197 | [
"def load_embed():\n import os\n location = os.path.join(os.path.dirname(__file__), 'lib/embed.js')\n import codecs\n return codecs.open(location, \"r\", \"utf-8\").read()\n"
] | class VisualizationLocal(object):
def __init__(self, html):
self._html = html
@classmethod
def _create(cls, data=None, images=None, type=None, options=None):
import base64
from jinja2 import Template, escape
t = Template(cls.load_template())
options = escape(json.dumps(options))
random_id = 'A' + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(9))
fields = {'viz': type, 'options': options, 'viz_id': random_id}
if images:
bytes = ['data:image/png;base64,' + base64.b64encode(img) + ',' for img in images]
fields['images'] = escape(json.dumps(bytes))
else:
data = escape(json.dumps(data))
fields['data'] = data
html = t.render(**fields)
viz = cls(html)
return viz
def get_html(self):
"""
Return html for this local visualization.
Assumes that Javascript has already been embedded,
to be used for rendering in notebooks.
"""
return self._html
@staticmethod
def load_template():
import os
location = os.path.join(os.path.dirname(__file__), 'lib/template.html')
return open(location).read()
@staticmethod
def load_embed():
import os
location = os.path.join(os.path.dirname(__file__), 'lib/embed.js')
import codecs
return codecs.open(location, "r", "utf-8").read()
|
michaeljoseph/changes | changes/__init__.py | initialise | python | def initialise():
global settings, project_settings
# Global changes settings
settings = Changes.load()
# Project specific settings
project_settings = Project.load(GitHubRepository(auth_token=settings.auth_token)) | Detects, prompts and initialises the project.
Stores project and tool configuration in the `changes` module. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/__init__.py#L21-L33 | [
"def load(cls):\n tool_config_path = Path(\n str(\n os.environ.get(\n 'CHANGES_CONFIG_FILE',\n expanduser('~/.changes')\n if not compat.IS_WINDOWS\n else expandvars(r'%APPDATA%\\\\.changes'),\n )\n )\n )\n\n too... | """Generates a github changelog, tags and uploads your python library"""
from datetime import date
from pathlib import Path
from changes.config import Changes, Project
from changes.models import Release, ReleaseType
from changes.models.repository import GitHubRepository
__version__ = '0.7.0'
__url__ = 'https://github.com/michaeljoseph/changes'
__author__ = 'Michael Joseph'
__email__ = 'michaeljoseph@gmail.com'
from .cli import main # noqa
settings = None
project_settings = None
def release_from_pull_requests():
global project_settings
repository = project_settings.repository
pull_requests = repository.pull_requests_since_latest_version
labels = set(
[
label_name
for pull_request in pull_requests
for label_name in pull_request.label_names
]
)
descriptions = [
'\n'.join([pull_request.title, pull_request.description])
for pull_request in pull_requests
]
bumpversion_part, release_type, proposed_version = determine_release(
repository.latest_version, descriptions, labels
)
releases_directory = Path(project_settings.releases_directory)
if not releases_directory.exists():
releases_directory.mkdir(parents=True)
release = Release(
release_date=date.today().isoformat(),
version=str(proposed_version),
bumpversion_part=bumpversion_part,
release_type=release_type,
)
release_files = [release_file for release_file in releases_directory.glob('*.md')]
if release_files:
release_file = release_files[0]
release.release_file_path = Path(project_settings.releases_directory).joinpath(
release_file.name
)
release.description = release_file.read_text()
return release
def determine_release(latest_version, descriptions, labels):
if 'BREAKING CHANGE' in descriptions:
return 'major', ReleaseType.BREAKING_CHANGE, latest_version.next_major()
elif 'enhancement' in labels:
return 'minor', ReleaseType.FEATURE, latest_version.next_minor()
elif 'bug' in labels:
return 'patch', ReleaseType.FIX, latest_version.next_patch()
else:
return None, ReleaseType.NO_CHANGE, latest_version
|
michaeljoseph/changes | changes/packaging.py | build_distributions | python | def build_distributions(context):
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
if not result:
raise Exception('Error building packages: %s' % result)
else:
log.info('Built %s' % ', '.join(packages))
return packages | Builds package distributions | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L10-L22 | [
"def dry_run(command, dry_run):\n \"\"\"Executes a shell command unless the dry run option is set\"\"\"\n if not dry_run:\n cmd_parts = command.split(' ')\n # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen\n return local[cmd_parts[0]](cmd_parts[1:])\n else:\... | import logging
from pathlib import Path
from shutil import rmtree
from changes import shell, util, venv, verification
log = logging.getLogger(__name__)
# tox
def install_package(context):
"""Attempts to install the sdist and wheel."""
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Successfully installed %s', distribution)
if context.test_command and verification.run_test_command(context):
log.info(
'Successfully ran test command: %s', context.test_command
)
except Exception as e:
raise Exception(
'Error installing distribution %s' % distribution, e
)
else:
log.info('Dry run, skipping installation')
# twine
def upload_package(context):
"""Uploads your project packages to pypi with twine."""
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not context.dry_run and not upload_result:
raise Exception('Error uploading: %s' % upload_result)
else:
log.info(
'Successfully uploaded %s:%s', context.module_name, context.new_version
)
else:
log.info('Dry run, skipping package upload')
def install_from_pypi(context):
"""Attempts to install your package from pypi."""
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
if not context.dry_run and not result:
log.error(
'Failed to install %s from %s', context.module_name, package_index
)
else:
log.info(
'Successfully installed %s from %s', context.module_name, package_index
)
except Exception as e:
error_msg = 'Error installing %s from %s' % (context.module_name, package_index)
log.exception(error_msg)
raise Exception(error_msg, e)
|
michaeljoseph/changes | changes/packaging.py | install_package | python | def install_package(context):
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Successfully installed %s', distribution)
if context.test_command and verification.run_test_command(context):
log.info(
'Successfully ran test command: %s', context.test_command
)
except Exception as e:
raise Exception(
'Error installing distribution %s' % distribution, e
)
else:
log.info('Dry run, skipping installation') | Attempts to install the sdist and wheel. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L26-L45 | [
"def build_distributions(context):\n \"\"\"Builds package distributions\"\"\"\n rmtree('dist', ignore_errors=True)\n\n build_package_command = 'python setup.py clean sdist bdist_wheel'\n result = shell.dry_run(build_package_command, context.dry_run)\n packages = Path('dist').files() if not context.dr... | import logging
from pathlib import Path
from shutil import rmtree
from changes import shell, util, venv, verification
log = logging.getLogger(__name__)
def build_distributions(context):
"""Builds package distributions"""
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
if not result:
raise Exception('Error building packages: %s' % result)
else:
log.info('Built %s' % ', '.join(packages))
return packages
# tox
# twine
def upload_package(context):
"""Uploads your project packages to pypi with twine."""
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not context.dry_run and not upload_result:
raise Exception('Error uploading: %s' % upload_result)
else:
log.info(
'Successfully uploaded %s:%s', context.module_name, context.new_version
)
else:
log.info('Dry run, skipping package upload')
def install_from_pypi(context):
"""Attempts to install your package from pypi."""
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
if not context.dry_run and not result:
log.error(
'Failed to install %s from %s', context.module_name, package_index
)
else:
log.info(
'Successfully installed %s from %s', context.module_name, package_index
)
except Exception as e:
error_msg = 'Error installing %s from %s' % (context.module_name, package_index)
log.exception(error_msg)
raise Exception(error_msg, e)
|
michaeljoseph/changes | changes/packaging.py | upload_package | python | def upload_package(context):
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not context.dry_run and not upload_result:
raise Exception('Error uploading: %s' % upload_result)
else:
log.info(
'Successfully uploaded %s:%s', context.module_name, context.new_version
)
else:
log.info('Dry run, skipping package upload') | Uploads your project packages to pypi with twine. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L49-L66 | [
"def dry_run(command, dry_run):\n \"\"\"Executes a shell command unless the dry run option is set\"\"\"\n if not dry_run:\n cmd_parts = command.split(' ')\n # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen\n return local[cmd_parts[0]](cmd_parts[1:])\n else:\... | import logging
from pathlib import Path
from shutil import rmtree
from changes import shell, util, venv, verification
log = logging.getLogger(__name__)
def build_distributions(context):
"""Builds package distributions"""
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
if not result:
raise Exception('Error building packages: %s' % result)
else:
log.info('Built %s' % ', '.join(packages))
return packages
# tox
def install_package(context):
"""Attempts to install the sdist and wheel."""
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Successfully installed %s', distribution)
if context.test_command and verification.run_test_command(context):
log.info(
'Successfully ran test command: %s', context.test_command
)
except Exception as e:
raise Exception(
'Error installing distribution %s' % distribution, e
)
else:
log.info('Dry run, skipping installation')
# twine
def install_from_pypi(context):
"""Attempts to install your package from pypi."""
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
if not context.dry_run and not result:
log.error(
'Failed to install %s from %s', context.module_name, package_index
)
else:
log.info(
'Successfully installed %s from %s', context.module_name, package_index
)
except Exception as e:
error_msg = 'Error installing %s from %s' % (context.module_name, package_index)
log.exception(error_msg)
raise Exception(error_msg, e)
|
michaeljoseph/changes | changes/packaging.py | install_from_pypi | python | def install_from_pypi(context):
tmp_dir = venv.create_venv()
install_cmd = '%s/bin/pip install %s' % (tmp_dir, context.module_name)
package_index = 'pypi'
if context.pypi:
install_cmd += '-i %s' % context.pypi
package_index = context.pypi
try:
result = shell.dry_run(install_cmd, context.dry_run)
if not context.dry_run and not result:
log.error(
'Failed to install %s from %s', context.module_name, package_index
)
else:
log.info(
'Successfully installed %s from %s', context.module_name, package_index
)
except Exception as e:
error_msg = 'Error installing %s from %s' % (context.module_name, package_index)
log.exception(error_msg)
raise Exception(error_msg, e) | Attempts to install your package from pypi. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/packaging.py#L69-L94 | [
"def dry_run(command, dry_run):\n \"\"\"Executes a shell command unless the dry run option is set\"\"\"\n if not dry_run:\n cmd_parts = command.split(' ')\n # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen\n return local[cmd_parts[0]](cmd_parts[1:])\n else:\... | import logging
from pathlib import Path
from shutil import rmtree
from changes import shell, util, venv, verification
log = logging.getLogger(__name__)
def build_distributions(context):
"""Builds package distributions"""
rmtree('dist', ignore_errors=True)
build_package_command = 'python setup.py clean sdist bdist_wheel'
result = shell.dry_run(build_package_command, context.dry_run)
packages = Path('dist').files() if not context.dry_run else "nothing"
if not result:
raise Exception('Error building packages: %s' % result)
else:
log.info('Built %s' % ', '.join(packages))
return packages
# tox
def install_package(context):
"""Attempts to install the sdist and wheel."""
if not context.dry_run and build_distributions(context):
with util.mktmpdir() as tmp_dir:
venv.create_venv(tmp_dir=tmp_dir)
for distribution in Path('dist').files():
try:
venv.install(distribution, tmp_dir)
log.info('Successfully installed %s', distribution)
if context.test_command and verification.run_test_command(context):
log.info(
'Successfully ran test command: %s', context.test_command
)
except Exception as e:
raise Exception(
'Error installing distribution %s' % distribution, e
)
else:
log.info('Dry run, skipping installation')
# twine
def upload_package(context):
"""Uploads your project packages to pypi with twine."""
if not context.dry_run and build_distributions(context):
upload_args = 'twine upload '
upload_args += ' '.join(Path('dist').files())
if context.pypi:
upload_args += ' -r %s' % context.pypi
upload_result = shell.dry_run(upload_args, context.dry_run)
if not context.dry_run and not upload_result:
raise Exception('Error uploading: %s' % upload_result)
else:
log.info(
'Successfully uploaded %s:%s', context.module_name, context.new_version
)
else:
log.info('Dry run, skipping package upload')
|
michaeljoseph/changes | changes/probe.py | report_and_raise | python | def report_and_raise(probe_name, probe_result, failure_msg):
log.info('%s? %s' % (probe_name, probe_result))
if not probe_result:
raise exceptions.ProbeException(failure_msg)
else:
return True | Logs the probe result and raises on failure | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/probe.py#L30-L36 | null | import logging
from os.path import exists
from plumbum import local
from plumbum.cmd import git
from plumbum.commands import CommandNotFound
from changes import attributes, exceptions
log = logging.getLogger(__name__)
TOOLS = ['git', 'diff', 'python']
TEST_RUNNERS = ['pytest', 'nose', 'tox']
README_EXTENSIONS = [
'.md',
'.rst',
'.txt',
'' '.wiki',
'.rdoc',
'.org',
'.pod',
'.creole',
'.textile',
]
def has_setup():
"""`setup.py`"""
return report_and_raise(
'Has a setup.py', exists('setup.py'), 'Your project needs a setup.py'
)
def has_binary(command):
try:
local.which(command)
return True
except CommandNotFound:
log.info('%s does not exist' % command)
return False
def has_tools():
return any([has_binary(tool) for tool in TOOLS])
def has_test_runner():
return any([has_binary(runner) for runner in TEST_RUNNERS])
def has_changelog():
"""CHANGELOG.md"""
return report_and_raise(
'CHANGELOG.md', exists('CHANGELOG.md'), 'Create a CHANGELOG.md file'
)
def has_readme():
"""README"""
return report_and_raise(
'README',
any([exists('README{}'.format(ext)) for ext in README_EXTENSIONS]),
'Create a (valid) README',
)
def has_metadata(python_module):
"""`<module_name>/__init__.py` with `__version__` and `__url__`"""
init_path = '{}/__init__.py'.format(python_module)
has_metadata = (
exists(init_path)
and attributes.has_attribute(python_module, '__version__')
and attributes.has_attribute(python_module, '__url__')
)
return report_and_raise(
'Has module metadata',
has_metadata,
'Your %s/__init__.py must contain __version__ and __url__ attributes',
)
def has_signing_key(context):
return 'signingkey' in git('config', '-l')
def probe_project(python_module):
"""
Check if the project meets `changes` requirements.
Complain and exit otherwise.
"""
log.info('Checking project for changes requirements.')
return (
has_tools()
and has_setup()
and has_metadata(python_module)
and has_test_runner()
and has_readme()
and has_changelog()
)
|
michaeljoseph/changes | changes/probe.py | has_metadata | python | def has_metadata(python_module):
init_path = '{}/__init__.py'.format(python_module)
has_metadata = (
exists(init_path)
and attributes.has_attribute(python_module, '__version__')
and attributes.has_attribute(python_module, '__url__')
)
return report_and_raise(
'Has module metadata',
has_metadata,
'Your %s/__init__.py must contain __version__ and __url__ attributes',
) | `<module_name>/__init__.py` with `__version__` and `__url__` | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/probe.py#L79-L91 | [
"def has_attribute(module_name, attribute_name):\n \"\"\"Is this attribute present?\"\"\"\n init_file = '%s/__init__.py' % module_name\n return any(\n [attribute_name in init_line for init_line in open(init_file).readlines()]\n )\n",
"def report_and_raise(probe_name, probe_result, failure_msg):... | import logging
from os.path import exists
from plumbum import local
from plumbum.cmd import git
from plumbum.commands import CommandNotFound
from changes import attributes, exceptions
log = logging.getLogger(__name__)
TOOLS = ['git', 'diff', 'python']
TEST_RUNNERS = ['pytest', 'nose', 'tox']
README_EXTENSIONS = [
'.md',
'.rst',
'.txt',
'' '.wiki',
'.rdoc',
'.org',
'.pod',
'.creole',
'.textile',
]
def report_and_raise(probe_name, probe_result, failure_msg):
"""Logs the probe result and raises on failure"""
log.info('%s? %s' % (probe_name, probe_result))
if not probe_result:
raise exceptions.ProbeException(failure_msg)
else:
return True
def has_setup():
"""`setup.py`"""
return report_and_raise(
'Has a setup.py', exists('setup.py'), 'Your project needs a setup.py'
)
def has_binary(command):
try:
local.which(command)
return True
except CommandNotFound:
log.info('%s does not exist' % command)
return False
def has_tools():
return any([has_binary(tool) for tool in TOOLS])
def has_test_runner():
return any([has_binary(runner) for runner in TEST_RUNNERS])
def has_changelog():
"""CHANGELOG.md"""
return report_and_raise(
'CHANGELOG.md', exists('CHANGELOG.md'), 'Create a CHANGELOG.md file'
)
def has_readme():
"""README"""
return report_and_raise(
'README',
any([exists('README{}'.format(ext)) for ext in README_EXTENSIONS]),
'Create a (valid) README',
)
def has_signing_key(context):
return 'signingkey' in git('config', '-l')
def probe_project(python_module):
"""
Check if the project meets `changes` requirements.
Complain and exit otherwise.
"""
log.info('Checking project for changes requirements.')
return (
has_tools()
and has_setup()
and has_metadata(python_module)
and has_test_runner()
and has_readme()
and has_changelog()
)
|
michaeljoseph/changes | changes/probe.py | probe_project | python | def probe_project(python_module):
log.info('Checking project for changes requirements.')
return (
has_tools()
and has_setup()
and has_metadata(python_module)
and has_test_runner()
and has_readme()
and has_changelog()
) | Check if the project meets `changes` requirements.
Complain and exit otherwise. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/probe.py#L98-L111 | [
"def has_setup():\n \"\"\"`setup.py`\"\"\"\n return report_and_raise(\n 'Has a setup.py', exists('setup.py'), 'Your project needs a setup.py'\n )\n",
"def has_tools():\n return any([has_binary(tool) for tool in TOOLS])\n",
"def has_test_runner():\n return any([has_binary(runner) for runner... | import logging
from os.path import exists
from plumbum import local
from plumbum.cmd import git
from plumbum.commands import CommandNotFound
from changes import attributes, exceptions
log = logging.getLogger(__name__)
TOOLS = ['git', 'diff', 'python']
TEST_RUNNERS = ['pytest', 'nose', 'tox']
README_EXTENSIONS = [
'.md',
'.rst',
'.txt',
'' '.wiki',
'.rdoc',
'.org',
'.pod',
'.creole',
'.textile',
]
def report_and_raise(probe_name, probe_result, failure_msg):
"""Logs the probe result and raises on failure"""
log.info('%s? %s' % (probe_name, probe_result))
if not probe_result:
raise exceptions.ProbeException(failure_msg)
else:
return True
def has_setup():
"""`setup.py`"""
return report_and_raise(
'Has a setup.py', exists('setup.py'), 'Your project needs a setup.py'
)
def has_binary(command):
try:
local.which(command)
return True
except CommandNotFound:
log.info('%s does not exist' % command)
return False
def has_tools():
return any([has_binary(tool) for tool in TOOLS])
def has_test_runner():
return any([has_binary(runner) for runner in TEST_RUNNERS])
def has_changelog():
"""CHANGELOG.md"""
return report_and_raise(
'CHANGELOG.md', exists('CHANGELOG.md'), 'Create a CHANGELOG.md file'
)
def has_readme():
"""README"""
return report_and_raise(
'README',
any([exists('README{}'.format(ext)) for ext in README_EXTENSIONS]),
'Create a (valid) README',
)
def has_metadata(python_module):
"""`<module_name>/__init__.py` with `__version__` and `__url__`"""
init_path = '{}/__init__.py'.format(python_module)
has_metadata = (
exists(init_path)
and attributes.has_attribute(python_module, '__version__')
and attributes.has_attribute(python_module, '__url__')
)
return report_and_raise(
'Has module metadata',
has_metadata,
'Your %s/__init__.py must contain __version__ and __url__ attributes',
)
def has_signing_key(context):
return 'signingkey' in git('config', '-l')
|
michaeljoseph/changes | changes/flow.py | publish | python | def publish(context):
commit_version_change(context)
if context.github:
# github token
project_settings = project_config(context.module_name)
if not project_settings['gh_token']:
click.echo('You need a GitHub token for changes to create a release.')
click.pause(
'Press [enter] to launch the GitHub "New personal access '
'token" page, to create a token for changes.'
)
click.launch('https://github.com/settings/tokens/new')
project_settings['gh_token'] = click.prompt('Enter your changes token')
store_settings(context.module_name, project_settings)
description = click.prompt('Describe this release')
upload_url = create_github_release(
context, project_settings['gh_token'], description
)
upload_release_distributions(
context,
project_settings['gh_token'],
build_distributions(context),
upload_url,
)
click.pause('Press [enter] to review and update your new release')
click.launch(
'{0}/releases/tag/{1}'.format(context.repo_url, context.new_version)
)
else:
tag_and_push(context) | Publishes the project | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/flow.py#L25-L60 | [
"def project_config():\n \"\"\"Deprecated\"\"\"\n project_name = curdir\n\n config_path = Path(join(project_name, PROJECT_CONFIG_FILE))\n\n if not exists(config_path):\n store_settings(DEFAULTS.copy())\n return DEFAULTS\n\n return toml.load(io.open(config_path)) or {}\n",
"def store_s... | import logging
import click
from changes.changelog import generate_changelog
from changes.config import project_config, store_settings
from changes.packaging import (
build_distributions,
install_from_pypi,
install_package,
upload_package,
)
from changes.vcs import (
commit_version_change,
create_github_release,
tag_and_push,
upload_release_distributions,
)
from changes.verification import run_tests
from changes.version import increment_version
log = logging.getLogger(__name__)
def perform_release(context):
"""Executes the release process."""
try:
run_tests()
if not context.skip_changelog:
generate_changelog(context)
increment_version(context)
build_distributions(context)
install_package(context)
upload_package(context)
install_from_pypi(context)
publish(context)
except Exception:
log.exception('Error releasing')
|
michaeljoseph/changes | changes/flow.py | perform_release | python | def perform_release(context):
try:
run_tests()
if not context.skip_changelog:
generate_changelog(context)
increment_version(context)
build_distributions(context)
install_package(context)
upload_package(context)
install_from_pypi(context)
publish(context)
except Exception:
log.exception('Error releasing') | Executes the release process. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/flow.py#L63-L83 | [
"def generate_changelog(context):\n \"\"\"Generates an automatic changelog from your commit messages.\"\"\"\n\n changelog_content = [\n '\\n## [%s](%s/compare/%s...%s)\\n\\n'\n % (\n context.new_version,\n context.repo_url,\n context.current_version,\n ... | import logging
import click
from changes.changelog import generate_changelog
from changes.config import project_config, store_settings
from changes.packaging import (
build_distributions,
install_from_pypi,
install_package,
upload_package,
)
from changes.vcs import (
commit_version_change,
create_github_release,
tag_and_push,
upload_release_distributions,
)
from changes.verification import run_tests
from changes.version import increment_version
log = logging.getLogger(__name__)
def publish(context):
"""Publishes the project"""
commit_version_change(context)
if context.github:
# github token
project_settings = project_config(context.module_name)
if not project_settings['gh_token']:
click.echo('You need a GitHub token for changes to create a release.')
click.pause(
'Press [enter] to launch the GitHub "New personal access '
'token" page, to create a token for changes.'
)
click.launch('https://github.com/settings/tokens/new')
project_settings['gh_token'] = click.prompt('Enter your changes token')
store_settings(context.module_name, project_settings)
description = click.prompt('Describe this release')
upload_url = create_github_release(
context, project_settings['gh_token'], description
)
upload_release_distributions(
context,
project_settings['gh_token'],
build_distributions(context),
upload_url,
)
click.pause('Press [enter] to review and update your new release')
click.launch(
'{0}/releases/tag/{1}'.format(context.repo_url, context.new_version)
)
else:
tag_and_push(context)
|
michaeljoseph/changes | changes/attributes.py | extract_attribute | python | def extract_attribute(module_name, attribute_name):
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip()) | Extract metatdata property from a module | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/attributes.py#L12-L17 | null | import ast
import logging
import tempfile
from pathlib import Path
from plumbum.cmd import diff
log = logging.getLogger(__name__)
# TODO: leverage bumpversion
def replace_attribute(module_name, attribute_name, new_value, dry_run=True):
"""Update a metadata attribute"""
init_file = '%s/__init__.py' % module_name
_, tmp_file = tempfile.mkstemp()
with open(init_file) as input_file:
with open(tmp_file, 'w') as output_file:
for line in input_file:
if line.startswith(attribute_name):
line = "%s = '%s'\n" % (attribute_name, new_value)
output_file.write(line)
if not dry_run:
Path(tmp_file).copy(init_file)
else:
log.info(diff(tmp_file, init_file, retcode=None))
def has_attribute(module_name, attribute_name):
"""Is this attribute present?"""
init_file = '%s/__init__.py' % module_name
return any(
[attribute_name in init_line for init_line in open(init_file).readlines()]
)
|
michaeljoseph/changes | changes/attributes.py | replace_attribute | python | def replace_attribute(module_name, attribute_name, new_value, dry_run=True):
init_file = '%s/__init__.py' % module_name
_, tmp_file = tempfile.mkstemp()
with open(init_file) as input_file:
with open(tmp_file, 'w') as output_file:
for line in input_file:
if line.startswith(attribute_name):
line = "%s = '%s'\n" % (attribute_name, new_value)
output_file.write(line)
if not dry_run:
Path(tmp_file).copy(init_file)
else:
log.info(diff(tmp_file, init_file, retcode=None)) | Update a metadata attribute | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/attributes.py#L20-L36 | null | import ast
import logging
import tempfile
from pathlib import Path
from plumbum.cmd import diff
log = logging.getLogger(__name__)
# TODO: leverage bumpversion
def extract_attribute(module_name, attribute_name):
"""Extract metatdata property from a module"""
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip())
def has_attribute(module_name, attribute_name):
"""Is this attribute present?"""
init_file = '%s/__init__.py' % module_name
return any(
[attribute_name in init_line for init_line in open(init_file).readlines()]
)
|
michaeljoseph/changes | changes/attributes.py | has_attribute | python | def has_attribute(module_name, attribute_name):
init_file = '%s/__init__.py' % module_name
return any(
[attribute_name in init_line for init_line in open(init_file).readlines()]
) | Is this attribute present? | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/attributes.py#L39-L44 | null | import ast
import logging
import tempfile
from pathlib import Path
from plumbum.cmd import diff
log = logging.getLogger(__name__)
# TODO: leverage bumpversion
def extract_attribute(module_name, attribute_name):
"""Extract metatdata property from a module"""
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip())
def replace_attribute(module_name, attribute_name, new_value, dry_run=True):
"""Update a metadata attribute"""
init_file = '%s/__init__.py' % module_name
_, tmp_file = tempfile.mkstemp()
with open(init_file) as input_file:
with open(tmp_file, 'w') as output_file:
for line in input_file:
if line.startswith(attribute_name):
line = "%s = '%s'\n" % (attribute_name, new_value)
output_file.write(line)
if not dry_run:
Path(tmp_file).copy(init_file)
else:
log.info(diff(tmp_file, init_file, retcode=None))
|
michaeljoseph/changes | changes/prompt.py | choose_labels | python | def choose_labels(alternatives):
if not alternatives:
raise ValueError
if not isinstance(alternatives, list):
raise TypeError
choice_map = OrderedDict(
('{}'.format(i), value) for i, value in enumerate(alternatives, 1)
)
# prepend a termination option
input_terminator = '0'
choice_map.update({input_terminator: '<done>'})
choice_map.move_to_end('0', last=False)
choice_indexes = choice_map.keys()
choice_lines = ['{} - {}'.format(*c) for c in choice_map.items()]
prompt = '\n'.join(
(
'Select labels:',
'\n'.join(choice_lines),
'Choose from {}'.format(', '.join(choice_indexes)),
)
)
user_choices = set()
user_choice = None
while not user_choice == input_terminator:
if user_choices:
note('Selected labels: [{}]'.format(', '.join(user_choices)))
user_choice = click.prompt(
prompt, type=click.Choice(choice_indexes), default=input_terminator
)
done = user_choice == input_terminator
new_selection = user_choice not in user_choices
nothing_selected = not user_choices
if not done and new_selection:
user_choices.add(choice_map[user_choice])
if done and nothing_selected:
error('Please select at least one label')
user_choice = None
return user_choices | Prompt the user select several labels from the provided alternatives.
At least one label must be selected.
:param list alternatives: Sequence of options that are available to select from
:return: Several selected labels | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/prompt.py#L8-L63 | [
"def error(message):\n echo(message, 'error')\n",
"def note(message):\n echo(message, 'note')\n"
] | from collections import OrderedDict
import click
from changes.commands import error, note
|
michaeljoseph/changes | changes/cli.py | work_in | python | def work_in(dirname=None):
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
requests_cache.configure(expire_after=60 * 10 * 10)
changes.initialise()
yield
finally:
os.chdir(curdir) | Context manager version of os.chdir. When exited, returns to the working
directory prior to entering. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/cli.py#L20-L36 | [
"def initialise():\n \"\"\"\n Detects, prompts and initialises the project.\n\n Stores project and tool configuration in the `changes` module.\n \"\"\"\n global settings, project_settings\n\n # Global changes settings\n settings = Changes.load()\n\n # Project specific settings\n project_s... | import contextlib
import os
import click
import requests_cache
import changes
from changes.commands import (
publish as publish_command,
stage as stage_command,
status as status_command,
)
from . import __version__
VERSION = 'changes {}'.format(__version__)
@contextlib.contextmanager
def print_version(context, param, value):
if not value or context.resilient_parsing:
return
click.echo(VERSION)
context.exit()
@click.option(
'--dry-run',
help='Prints (instead of executing) the operations to be performed.',
is_flag=True,
default=False,
)
@click.option('--verbose', help='Enables verbose output.', is_flag=True, default=False)
@click.version_option(__version__, '-V', '--version', message=VERSION)
@click.group(context_settings=dict(help_option_names=[u'-h', u'--help']))
def main(dry_run, verbose):
"""Ch-ch-changes"""
@click.command()
@click.argument('repo_directory', required=False)
def status(repo_directory):
"""
Shows current project release status.
"""
repo_directory = repo_directory if repo_directory else '.'
with work_in(repo_directory):
status_command.status()
main.add_command(status)
@click.command()
@click.option('--draft', help='Enables verbose output.', is_flag=True, default=False)
@click.option(
'--discard',
help='Discards the changes made to release files',
is_flag=True,
default=False,
)
@click.argument('repo_directory', default='.', required=False)
@click.argument('release_name', required=False)
@click.argument('release_description', required=False)
def stage(draft, discard, repo_directory, release_name, release_description):
"""
Stages a release
"""
with work_in(repo_directory):
if discard:
stage_command.discard(release_name, release_description)
else:
stage_command.stage(draft, release_name, release_description)
main.add_command(stage)
@click.command()
@click.argument('repo_directory', default='.', required=False)
def publish(repo_directory):
"""
Publishes a release
"""
with work_in(repo_directory):
publish_command.publish()
main.add_command(publish)
|
michaeljoseph/changes | changes/cli.py | stage | python | def stage(draft, discard, repo_directory, release_name, release_description):
with work_in(repo_directory):
if discard:
stage_command.discard(release_name, release_description)
else:
stage_command.stage(draft, release_name, release_description) | Stages a release | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/cli.py#L85-L93 | [
"def discard(release_name='', release_description=''):\n repository = changes.project_settings.repository\n\n release = changes.release_from_pull_requests()\n if release.version == str(repository.latest_version):\n info('No staged release to discard')\n return\n\n info('Discarding currentl... | import contextlib
import os
import click
import requests_cache
import changes
from changes.commands import (
publish as publish_command,
stage as stage_command,
status as status_command,
)
from . import __version__
VERSION = 'changes {}'.format(__version__)
@contextlib.contextmanager
def work_in(dirname=None):
"""
Context manager version of os.chdir. When exited, returns to the working
directory prior to entering.
"""
curdir = os.getcwd()
try:
if dirname is not None:
os.chdir(dirname)
requests_cache.configure(expire_after=60 * 10 * 10)
changes.initialise()
yield
finally:
os.chdir(curdir)
def print_version(context, param, value):
if not value or context.resilient_parsing:
return
click.echo(VERSION)
context.exit()
@click.option(
'--dry-run',
help='Prints (instead of executing) the operations to be performed.',
is_flag=True,
default=False,
)
@click.option('--verbose', help='Enables verbose output.', is_flag=True, default=False)
@click.version_option(__version__, '-V', '--version', message=VERSION)
@click.group(context_settings=dict(help_option_names=[u'-h', u'--help']))
def main(dry_run, verbose):
"""Ch-ch-changes"""
@click.command()
@click.argument('repo_directory', required=False)
def status(repo_directory):
"""
Shows current project release status.
"""
repo_directory = repo_directory if repo_directory else '.'
with work_in(repo_directory):
status_command.status()
main.add_command(status)
@click.command()
@click.option('--draft', help='Enables verbose output.', is_flag=True, default=False)
@click.option(
'--discard',
help='Discards the changes made to release files',
is_flag=True,
default=False,
)
@click.argument('repo_directory', default='.', required=False)
@click.argument('release_name', required=False)
@click.argument('release_description', required=False)
main.add_command(stage)
@click.command()
@click.argument('repo_directory', default='.', required=False)
def publish(repo_directory):
"""
Publishes a release
"""
with work_in(repo_directory):
publish_command.publish()
main.add_command(publish)
|
michaeljoseph/changes | changes/changelog.py | generate_changelog | python | def generate_changelog(context):
changelog_content = [
'\n## [%s](%s/compare/%s...%s)\n\n'
% (
context.new_version,
context.repo_url,
context.current_version,
context.new_version,
)
]
git_log_content = None
git_log = 'log --oneline --no-merges --no-color'.split(' ')
try:
git_log_tag = git_log + ['%s..master' % context.current_version]
git_log_content = git(git_log_tag)
log.debug('content: %s' % git_log_content)
except Exception:
log.warn('Error diffing previous version, initial release')
git_log_content = git(git_log)
git_log_content = replace_sha_with_commit_link(context.repo_url, git_log_content)
# turn change log entries into markdown bullet points
if git_log_content:
[
changelog_content.append('* %s\n' % line) if line else line
for line in git_log_content[:-1]
]
write_new_changelog(
context.repo_url, 'CHANGELOG.md', changelog_content, dry_run=context.dry_run
)
log.info('Added content to CHANGELOG.md')
context.changelog_content = changelog_content | Generates an automatic changelog from your commit messages. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/changelog.py#L48-L83 | [
"def write_new_changelog(repo_url, filename, content_lines, dry_run=True):\n heading_and_newline = '# [Changelog](%s/releases)\\n' % repo_url\n\n with io.open(filename, 'r+') as f:\n existing = f.readlines()\n\n output = existing[2:]\n output.insert(0, '\\n')\n\n for index, line in enumerate(c... | import io
import logging
import re
from plumbum.cmd import git
log = logging.getLogger(__name__)
def write_new_changelog(repo_url, filename, content_lines, dry_run=True):
heading_and_newline = '# [Changelog](%s/releases)\n' % repo_url
with io.open(filename, 'r+') as f:
existing = f.readlines()
output = existing[2:]
output.insert(0, '\n')
for index, line in enumerate(content_lines):
output.insert(0, content_lines[len(content_lines) - index - 1])
output.insert(0, heading_and_newline)
output = ''.join(output)
if not dry_run:
with io.open(filename, 'w+') as f:
f.write(output)
else:
log.info('New changelog:\n%s', ''.join(content_lines))
def replace_sha_with_commit_link(repo_url, git_log_content):
git_log_content = git_log_content.split('\n')
for index, line in enumerate(git_log_content):
# http://stackoverflow.com/a/468378/5549
sha1_re = re.match(r'^[0-9a-f]{5,40}\b', line)
if sha1_re:
sha1 = sha1_re.group()
new_line = line.replace(sha1, '[%s](%s/commit/%s)' % (sha1, repo_url, sha1))
log.debug('old line: %s\nnew line: %s', line, new_line)
git_log_content[index] = new_line
return git_log_content
|
michaeljoseph/changes | changes/util.py | extract | python | def extract(dictionary, keys):
return dict((k, dictionary[k]) for k in keys if k in dictionary) | Extract only the specified keys from a dict
:param dictionary: source dictionary
:param keys: list of keys to extract
:return dict: extracted dictionary | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/util.py#L6-L14 | null | import contextlib
import tempfile
from shutil import rmtree
def extract_arguments(arguments, long_keys, key_prefix='--'):
"""
:param arguments: dict of command line arguments
"""
long_arguments = extract(arguments, long_keys)
return dict(
[(key.replace(key_prefix, ''), value) for key, value in long_arguments.items()]
)
@contextlib.contextmanager
def mktmpdir():
tmp_dir = tempfile.mkdtemp()
try:
yield tmp_dir
finally:
rmtree(tmp_dir)
|
michaeljoseph/changes | changes/util.py | extract_arguments | python | def extract_arguments(arguments, long_keys, key_prefix='--'):
long_arguments = extract(arguments, long_keys)
return dict(
[(key.replace(key_prefix, ''), value) for key, value in long_arguments.items()]
) | :param arguments: dict of command line arguments | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/util.py#L17-L25 | [
"def extract(dictionary, keys):\n \"\"\"\n Extract only the specified keys from a dict\n\n :param dictionary: source dictionary\n :param keys: list of keys to extract\n :return dict: extracted dictionary\n \"\"\"\n return dict((k, dictionary[k]) for k in keys if k in dictionary)\n"
] | import contextlib
import tempfile
from shutil import rmtree
def extract(dictionary, keys):
"""
Extract only the specified keys from a dict
:param dictionary: source dictionary
:param keys: list of keys to extract
:return dict: extracted dictionary
"""
return dict((k, dictionary[k]) for k in keys if k in dictionary)
@contextlib.contextmanager
def mktmpdir():
tmp_dir = tempfile.mkdtemp()
try:
yield tmp_dir
finally:
rmtree(tmp_dir)
|
michaeljoseph/changes | changes/vcs.py | tag_and_push | python | def tag_and_push(context):
tag_option = '--annotate'
if probe.has_signing_key(context):
tag_option = '--sign'
shell.dry_run(
TAG_TEMPLATE % (tag_option, context.new_version, context.new_version),
context.dry_run,
)
shell.dry_run('git push --tags', context.dry_run) | Tags your git repo with the new version number | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/vcs.py#L30-L41 | [
"def dry_run(command, dry_run):\n \"\"\"Executes a shell command unless the dry run option is set\"\"\"\n if not dry_run:\n cmd_parts = command.split(' ')\n # http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen\n return local[cmd_parts[0]](cmd_parts[1:])\n else:\... | import io
import logging
import click
import requests
from uritemplate import expand
from changes import probe, shell
log = logging.getLogger(__name__)
COMMIT_TEMPLATE = 'git commit --message="%s" %s/__init__.py CHANGELOG.md'
TAG_TEMPLATE = 'git tag %s %s --message="%s"'
EXT_TO_MIME_TYPE = {
'.gz': 'application/x-gzip',
'.whl': 'application/zip',
'.zip': 'application/zip',
}
def commit_version_change(context):
# TODO: signed commits?
shell.dry_run(
COMMIT_TEMPLATE % (context.new_version, context.module_name), context.dry_run
)
shell.dry_run('git push', context.dry_run)
def create_github_release(context, gh_token, description):
params = {
'tag_name': context.new_version,
'name': description,
'body': ''.join(context.changelog_content),
'prerelease': True,
}
response = requests.post(
'https://api.github.com/repos/{owner}/{repo}/releases'.format(
owner=context.owner, repo=context.repo
),
auth=(gh_token, 'x-oauth-basic'),
json=params,
).json()
click.echo('Created release {response}'.format(response=response))
return response['upload_url']
def upload_release_distributions(context, gh_token, distributions, upload_url):
for distribution in distributions:
click.echo(
'Uploading {distribution} to {upload_url}'.format(
distribution=distribution, upload_url=upload_url
)
)
response = requests.post(
expand(upload_url, dict(name=distribution.name)),
auth=(gh_token, 'x-oauth-basic'),
headers={'content-type': EXT_TO_MIME_TYPE[distribution.ext]},
data=io.open(distribution, mode='rb'),
verify=False,
)
click.echo('Upload response: {response}'.format(response=response))
|
michaeljoseph/changes | changes/shell.py | dry_run | python | def dry_run(command, dry_run):
if not dry_run:
cmd_parts = command.split(' ')
# http://plumbum.readthedocs.org/en/latest/local_commands.html#run-and-popen
return local[cmd_parts[0]](cmd_parts[1:])
else:
log.info('Dry run of %s, skipping' % command)
return True | Executes a shell command unless the dry run option is set | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/shell.py#L8-L16 | null | import logging
from plumbum import local
log = logging.getLogger(__name__)
|
michaeljoseph/changes | changes/config.py | project_config | python | def project_config():
project_name = curdir
config_path = Path(join(project_name, PROJECT_CONFIG_FILE))
if not exists(config_path):
store_settings(DEFAULTS.copy())
return DEFAULTS
return toml.load(io.open(config_path)) or {} | Deprecated | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/config.py#L195-L205 | [
"def store_settings(settings):\n pass\n"
] | import io
import os
from os.path import curdir, exists, expanduser, expandvars, join
from pathlib import Path
import attr
import click
import inflection
import toml
from changes import compat, prompt
from changes.models import BumpVersion
from .commands import debug, info, note
AUTH_TOKEN_ENVVAR = 'GITHUB_AUTH_TOKEN'
# via https://github.com/jakubroztocil/httpie/blob/6bdfc7a/httpie/config.py#L9
DEFAULT_CONFIG_FILE = str(
os.environ.get(
'CHANGES_CONFIG_FILE',
expanduser('~/.changes')
if not compat.IS_WINDOWS
else expandvars(r'%APPDATA%\\.changes'),
)
)
PROJECT_CONFIG_FILE = '.changes.toml'
DEFAULT_RELEASES_DIRECTORY = 'docs/releases'
@attr.s
class Changes(object):
auth_token = attr.ib()
@classmethod
def load(cls):
tool_config_path = Path(
str(
os.environ.get(
'CHANGES_CONFIG_FILE',
expanduser('~/.changes')
if not compat.IS_WINDOWS
else expandvars(r'%APPDATA%\\.changes'),
)
)
)
tool_settings = None
if tool_config_path.exists():
tool_settings = Changes(**(toml.load(tool_config_path.open())['changes']))
# envvar takes precedence over config file settings
auth_token = os.environ.get(AUTH_TOKEN_ENVVAR)
if auth_token:
info('Found Github Auth Token in the environment')
tool_settings = Changes(auth_token=auth_token)
elif not (tool_settings and tool_settings.auth_token):
while not auth_token:
info('No auth token found, asking for it')
# to interact with the Git*H*ub API
note('You need a Github Auth Token for changes to create a release.')
click.pause(
'Press [enter] to launch the GitHub "New personal access '
'token" page, to create a token for changes.'
)
click.launch('https://github.com/settings/tokens/new')
auth_token = click.prompt('Enter your changes token')
if not tool_settings:
tool_settings = Changes(auth_token=auth_token)
tool_config_path.write_text(
toml.dumps({'changes': attr.asdict(tool_settings)})
)
return tool_settings
@attr.s
class Project(object):
releases_directory = attr.ib()
repository = attr.ib(default=None)
bumpversion = attr.ib(default=None)
labels = attr.ib(default=attr.Factory(dict))
@classmethod
def load(cls, repository):
changes_project_config_path = Path(PROJECT_CONFIG_FILE)
project_settings = None
if changes_project_config_path.exists():
# releases_directory, labels
project_settings = Project(
**(toml.load(changes_project_config_path.open())['changes'])
)
if not project_settings:
releases_directory = Path(
click.prompt(
'Enter the directory to store your releases notes',
DEFAULT_RELEASES_DIRECTORY,
type=click.Path(exists=True, dir_okay=True),
)
)
if not releases_directory.exists():
debug(
'Releases directory {} not found, creating it.'.format(
releases_directory
)
)
releases_directory.mkdir(parents=True)
project_settings = Project(
releases_directory=str(releases_directory),
labels=configure_labels(repository.labels),
)
# write config file
changes_project_config_path.write_text(
toml.dumps({'changes': attr.asdict(project_settings)})
)
project_settings.repository = repository
project_settings.bumpversion = BumpVersion.load(repository.latest_version)
return project_settings
def configure_labels(github_labels):
labels_keyed_by_name = {}
for label in github_labels:
labels_keyed_by_name[label['name']] = label
# TODO: streamlined support for github defaults: enhancement, bug
changelog_worthy_labels = prompt.choose_labels(
[properties['name'] for _, properties in labels_keyed_by_name.items()]
)
# TODO: apply description transform in labels_prompt function
described_labels = {}
# auto-generate label descriptions
for label_name in changelog_worthy_labels:
label_properties = labels_keyed_by_name[label_name]
# Auto-generate description as pluralised titlecase label name
label_properties['description'] = inflection.pluralize(
inflection.titleize(label_name)
)
described_labels[label_name] = label_properties
return described_labels
# TODO: borg legacy
DEFAULTS = {
'changelog': 'CHANGELOG.md',
'readme': 'README.md',
'github_auth_token': None,
}
class Config:
"""Deprecated"""
test_command = None
pypi = None
skip_changelog = None
changelog_content = None
repo = None
def __init__(
self,
module_name,
dry_run,
debug,
no_input,
requirements,
new_version,
current_version,
repo_url,
version_prefix,
):
self.module_name = module_name
# module_name => project_name => curdir
self.dry_run = dry_run
self.debug = debug
self.no_input = no_input
self.requirements = requirements
self.new_version = (
version_prefix + new_version if version_prefix else new_version
)
self.current_version = current_version
def store_settings(settings):
pass
|
michaeljoseph/changes | changes/version.py | increment | python | def increment(version, major=False, minor=False, patch=True):
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version) | Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/version.py#L35-L56 | null | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import click
import semantic_version
from changes import attributes
log = logging.getLogger(__name__)
def current_version(module_name):
return attributes.extract_attribute(module_name, '__version__')
def get_new_version(
module_name, current_version, no_input, major=False, minor=False, patch=False
):
proposed_new_version = increment(
current_version, major=major, minor=minor, patch=patch
)
if no_input:
new_version = proposed_new_version
else:
new_version = click.prompt(
'What is the release version for "{0}" '.format(module_name),
default=proposed_new_version,
)
return new_version.strip()
def increment_version(context):
"""Increments the __version__ attribute of your module's __init__."""
attributes.replace_attribute(
context.module_name, '__version__', context.new_version, dry_run=context.dry_run
)
log.info(
'Bumped version from %s to %s' % (context.current_version, context.new_version)
)
|
michaeljoseph/changes | changes/version.py | increment_version | python | def increment_version(context):
attributes.replace_attribute(
context.module_name, '__version__', context.new_version, dry_run=context.dry_run
)
log.info(
'Bumped version from %s to %s' % (context.current_version, context.new_version)
) | Increments the __version__ attribute of your module's __init__. | train | https://github.com/michaeljoseph/changes/blob/a8beb409671c58cdf28ee913bad0a5c7d5374ade/changes/version.py#L59-L67 | [
"def replace_attribute(module_name, attribute_name, new_value, dry_run=True):\n \"\"\"Update a metadata attribute\"\"\"\n init_file = '%s/__init__.py' % module_name\n _, tmp_file = tempfile.mkstemp()\n\n with open(init_file) as input_file:\n with open(tmp_file, 'w') as output_file:\n f... | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import click
import semantic_version
from changes import attributes
log = logging.getLogger(__name__)
def current_version(module_name):
return attributes.extract_attribute(module_name, '__version__')
def get_new_version(
module_name, current_version, no_input, major=False, minor=False, patch=False
):
proposed_new_version = increment(
current_version, major=major, minor=minor, patch=patch
)
if no_input:
new_version = proposed_new_version
else:
new_version = click.prompt(
'What is the release version for "{0}" '.format(module_name),
default=proposed_new_version,
)
return new_version.strip()
def increment(version, major=False, minor=False, patch=True):
"""
Increment a semantic version
:param version: str of the version to increment
:param major: bool specifying major level version increment
:param minor: bool specifying minor level version increment
:param patch: bool specifying patch level version increment
:return: str of the incremented version
"""
version = semantic_version.Version(version)
if major:
version.major += 1
version.minor = 0
version.patch = 0
elif minor:
version.minor += 1
version.patch = 0
elif patch:
version.patch += 1
return str(version)
|
gpennington/PyMarvel | marvel/creator.py | Creator.get_comics | python | def get_comics(self, *args, **kwargs):
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs) | Returns a full ComicDataWrapper object for this creator.
/creators/{creatorId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set. | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L140-L149 | [
"def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n \"\"\"\n Takes a related resource Class \n and returns the related resource DataWrapper.\n For Example: Given a Character instance, return\n a ComicsDataWrapper related to that character.\n /character/{characterId}/com... | class Creator(MarvelObject):
"""
Creator object
Takes a dict of creator attrs
"""
_resource_url = 'creators'
@property
def id(self):
return int(self.dict['id'])
@property
def firstName(self):
return self.dict['firstName']
@property
def middleName(self):
return self.dict['middleName']
@property
def lastName(self):
return self.dict['lastName']
@property
def suffix(self):
return self.dict['suffix']
@property
def fullName(self):
return self.dict['fullName']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def urls(self):
return self.dict['urls']
"""
@property
def wiki(self):
for item in self.dict['urls']:
if item['type'] == 'wiki':
return item['url']
return None
@property
def detail(self):
for item in self.dict['urls']:
if item['type'] == 'detail':
return item['url']
return None
"""
@property
def thumbnail(self):
return "%s.%s" % (self.dict['thumbnail']['path'], self.dict['thumbnail']['extension'] )
@property
def series(self):
"""
Returns SeriesList object
"""
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def stories(self):
"""
Returns StoryList object
"""
from .story import StoryList
return StoryList(self.marvel, self.dict['stories'])
@property
def comics(self):
from .comic import ComicList
"""
Returns ComicList object
"""
return ComicList(self.marvel, self.dict['comics'])
@property
def events(self):
"""
Returns EventList object
"""
from .event import EventList
return EventList(self.marvel, self.dict['events'])
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object for this creator.
/creators/{creatorId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_events(self, *args, **kwargs):
"""
Returns a full EventDataWrapper object for this creator.
/creators/{creatorId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
"""
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
def get_series(self, *args, **kwargs):
"""
Returns a full SeriesDataWrapper object for this creator.
/creators/{creatorId}/series
:returns: SeriesDataWrapper -- A new request to API. Contains full results set.
"""
from .series import Series, SeriesDataWrapper
return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
def get_stories(self, *args, **kwargs):
"""
Returns a full StoryDataWrapper object for this creator.
/creators/{creatorId}/stories
:returns: StoriesDataWrapper -- A new request to API. Contains full results set.
"""
from .story import Story, StoryDataWrapper
return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
|
gpennington/PyMarvel | marvel/creator.py | Creator.get_events | python | def get_events(self, *args, **kwargs):
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs) | Returns a full EventDataWrapper object for this creator.
/creators/{creatorId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set. | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L151-L160 | [
"def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n \"\"\"\n Takes a related resource Class \n and returns the related resource DataWrapper.\n For Example: Given a Character instance, return\n a ComicsDataWrapper related to that character.\n /character/{characterId}/com... | class Creator(MarvelObject):
"""
Creator object
Takes a dict of creator attrs
"""
_resource_url = 'creators'
@property
def id(self):
return int(self.dict['id'])
@property
def firstName(self):
return self.dict['firstName']
@property
def middleName(self):
return self.dict['middleName']
@property
def lastName(self):
return self.dict['lastName']
@property
def suffix(self):
return self.dict['suffix']
@property
def fullName(self):
return self.dict['fullName']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def urls(self):
return self.dict['urls']
"""
@property
def wiki(self):
for item in self.dict['urls']:
if item['type'] == 'wiki':
return item['url']
return None
@property
def detail(self):
for item in self.dict['urls']:
if item['type'] == 'detail':
return item['url']
return None
"""
@property
def thumbnail(self):
return "%s.%s" % (self.dict['thumbnail']['path'], self.dict['thumbnail']['extension'] )
@property
def series(self):
"""
Returns SeriesList object
"""
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def stories(self):
"""
Returns StoryList object
"""
from .story import StoryList
return StoryList(self.marvel, self.dict['stories'])
@property
def comics(self):
from .comic import ComicList
"""
Returns ComicList object
"""
return ComicList(self.marvel, self.dict['comics'])
@property
def events(self):
"""
Returns EventList object
"""
from .event import EventList
return EventList(self.marvel, self.dict['events'])
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object for this creator.
/creators/{creatorId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_series(self, *args, **kwargs):
"""
Returns a full SeriesDataWrapper object for this creator.
/creators/{creatorId}/series
:returns: SeriesDataWrapper -- A new request to API. Contains full results set.
"""
from .series import Series, SeriesDataWrapper
return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
def get_stories(self, *args, **kwargs):
"""
Returns a full StoryDataWrapper object for this creator.
/creators/{creatorId}/stories
:returns: StoriesDataWrapper -- A new request to API. Contains full results set.
"""
from .story import Story, StoryDataWrapper
return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
|
gpennington/PyMarvel | marvel/creator.py | Creator.get_series | python | def get_series(self, *args, **kwargs):
from .series import Series, SeriesDataWrapper
return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs) | Returns a full SeriesDataWrapper object for this creator.
/creators/{creatorId}/series
:returns: SeriesDataWrapper -- A new request to API. Contains full results set. | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L162-L171 | [
"def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n \"\"\"\n Takes a related resource Class \n and returns the related resource DataWrapper.\n For Example: Given a Character instance, return\n a ComicsDataWrapper related to that character.\n /character/{characterId}/com... | class Creator(MarvelObject):
"""
Creator object
Takes a dict of creator attrs
"""
_resource_url = 'creators'
@property
def id(self):
return int(self.dict['id'])
@property
def firstName(self):
return self.dict['firstName']
@property
def middleName(self):
return self.dict['middleName']
@property
def lastName(self):
return self.dict['lastName']
@property
def suffix(self):
return self.dict['suffix']
@property
def fullName(self):
return self.dict['fullName']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def urls(self):
return self.dict['urls']
"""
@property
def wiki(self):
for item in self.dict['urls']:
if item['type'] == 'wiki':
return item['url']
return None
@property
def detail(self):
for item in self.dict['urls']:
if item['type'] == 'detail':
return item['url']
return None
"""
@property
def thumbnail(self):
return "%s.%s" % (self.dict['thumbnail']['path'], self.dict['thumbnail']['extension'] )
@property
def series(self):
"""
Returns SeriesList object
"""
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def stories(self):
"""
Returns StoryList object
"""
from .story import StoryList
return StoryList(self.marvel, self.dict['stories'])
@property
def comics(self):
from .comic import ComicList
"""
Returns ComicList object
"""
return ComicList(self.marvel, self.dict['comics'])
@property
def events(self):
"""
Returns EventList object
"""
from .event import EventList
return EventList(self.marvel, self.dict['events'])
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object for this creator.
/creators/{creatorId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_events(self, *args, **kwargs):
"""
Returns a full EventDataWrapper object for this creator.
/creators/{creatorId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
"""
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
def get_stories(self, *args, **kwargs):
"""
Returns a full StoryDataWrapper object for this creator.
/creators/{creatorId}/stories
:returns: StoriesDataWrapper -- A new request to API. Contains full results set.
"""
from .story import Story, StoryDataWrapper
return self.get_related_resource(Story, StoryDataWrapper, args, kwargs)
|
gpennington/PyMarvel | marvel/creator.py | Creator.get_stories | python | def get_stories(self, *args, **kwargs):
from .story import Story, StoryDataWrapper
return self.get_related_resource(Story, StoryDataWrapper, args, kwargs) | Returns a full StoryDataWrapper object for this creator.
/creators/{creatorId}/stories
:returns: StoriesDataWrapper -- A new request to API. Contains full results set. | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/creator.py#L173-L182 | [
"def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n \"\"\"\n Takes a related resource Class \n and returns the related resource DataWrapper.\n For Example: Given a Character instance, return\n a ComicsDataWrapper related to that character.\n /character/{characterId}/com... | class Creator(MarvelObject):
"""
Creator object
Takes a dict of creator attrs
"""
_resource_url = 'creators'
@property
def id(self):
return int(self.dict['id'])
@property
def firstName(self):
return self.dict['firstName']
@property
def middleName(self):
return self.dict['middleName']
@property
def lastName(self):
return self.dict['lastName']
@property
def suffix(self):
return self.dict['suffix']
@property
def fullName(self):
return self.dict['fullName']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def urls(self):
return self.dict['urls']
"""
@property
def wiki(self):
for item in self.dict['urls']:
if item['type'] == 'wiki':
return item['url']
return None
@property
def detail(self):
for item in self.dict['urls']:
if item['type'] == 'detail':
return item['url']
return None
"""
@property
def thumbnail(self):
return "%s.%s" % (self.dict['thumbnail']['path'], self.dict['thumbnail']['extension'] )
@property
def series(self):
"""
Returns SeriesList object
"""
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def stories(self):
"""
Returns StoryList object
"""
from .story import StoryList
return StoryList(self.marvel, self.dict['stories'])
@property
def comics(self):
from .comic import ComicList
"""
Returns ComicList object
"""
return ComicList(self.marvel, self.dict['comics'])
@property
def events(self):
"""
Returns EventList object
"""
from .event import EventList
return EventList(self.marvel, self.dict['events'])
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object for this creator.
/creators/{creatorId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_events(self, *args, **kwargs):
"""
Returns a full EventDataWrapper object for this creator.
/creators/{creatorId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
"""
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
def get_series(self, *args, **kwargs):
"""
Returns a full SeriesDataWrapper object for this creator.
/creators/{creatorId}/series
:returns: SeriesDataWrapper -- A new request to API. Contains full results set.
"""
from .series import Series, SeriesDataWrapper
return self.get_related_resource(Series, SeriesDataWrapper, args, kwargs)
|
gpennington/PyMarvel | marvel/core.py | MarvelObject.list_to_instance_list | python | def list_to_instance_list(_self, _list, _Class):
items = []
for item in _list:
items.append(_Class(_self.marvel, item))
return items | Takes a list of resource dicts and returns a list
of resource instances, defined by the _Class param.
:param _self: Original resource calling the method
:type _self: core.MarvelObject
:param _list: List of dicts describing a Resource.
:type _list: list
:param _Class: The Resource class to create a list of (Comic, Creator, etc).
:type _Class: core.MarvelObject
:returns: list -- List of Resource instances (Comic, Creator, etc). | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/core.py#L39-L56 | null | class MarvelObject(object):
"""
Base class for all Marvel API classes
"""
def __init__(self, marvel, dict):
self.marvel = marvel
self.dict = dict
def __unicode__(self):
"""
:returns: str -- Name or Title of Resource
"""
try:
return self.dict['name']
except:
return self.dict['title']
def to_dict(self):
"""
:returns: dict -- Dictionary representation of the Resource
"""
return self.dict
@classmethod
def resource_url(cls):
"""
:returns: str -- Resource URL
"""
return cls._resource_url
def list_to_instance_list(_self, _list, _Class):
"""
Takes a list of resource dicts and returns a list
of resource instances, defined by the _Class param.
:param _self: Original resource calling the method
:type _self: core.MarvelObject
:param _list: List of dicts describing a Resource.
:type _list: list
:param _Class: The Resource class to create a list of (Comic, Creator, etc).
:type _Class: core.MarvelObject
:returns: list -- List of Resource instances (Comic, Creator, etc).
"""
items = []
for item in _list:
items.append(_Class(_self.marvel, item))
return items
def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):
"""
Takes a related resource Class
and returns the related resource DataWrapper.
For Example: Given a Character instance, return
a ComicsDataWrapper related to that character.
/character/{characterId}/comics
:param _Class: The Resource class retrieve
:type _Class: core.MarvelObject
:param _ClassDataWrapper: The Resource response object
:type _Class: core.MarvelObject
:param kwargs: dict of query params for the API
:type kwargs: dict
:returns: DataWrapper -- DataWrapper for requested Resource
"""
url = "%s/%s/%s" % (_self.resource_url(), _self.id, _Class.resource_url())
response = json.loads(_self.marvel._call(url, _self.marvel._params(kwargs)).text)
return _ClassDataWrapper(_self.marvel, response)
|
gpennington/PyMarvel | marvel/core.py | MarvelObject.get_related_resource | python | def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):
url = "%s/%s/%s" % (_self.resource_url(), _self.id, _Class.resource_url())
response = json.loads(_self.marvel._call(url, _self.marvel._params(kwargs)).text)
return _ClassDataWrapper(_self.marvel, response) | Takes a related resource Class
and returns the related resource DataWrapper.
For Example: Given a Character instance, return
a ComicsDataWrapper related to that character.
/character/{characterId}/comics
:param _Class: The Resource class retrieve
:type _Class: core.MarvelObject
:param _ClassDataWrapper: The Resource response object
:type _Class: core.MarvelObject
:param kwargs: dict of query params for the API
:type kwargs: dict
:returns: DataWrapper -- DataWrapper for requested Resource | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/core.py#L58-L77 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n"
] | class MarvelObject(object):
"""
Base class for all Marvel API classes
"""
def __init__(self, marvel, dict):
self.marvel = marvel
self.dict = dict
def __unicode__(self):
"""
:returns: str -- Name or Title of Resource
"""
try:
return self.dict['name']
except:
return self.dict['title']
def to_dict(self):
"""
:returns: dict -- Dictionary representation of the Resource
"""
return self.dict
@classmethod
def resource_url(cls):
"""
:returns: str -- Resource URL
"""
return cls._resource_url
def list_to_instance_list(_self, _list, _Class):
"""
Takes a list of resource dicts and returns a list
of resource instances, defined by the _Class param.
:param _self: Original resource calling the method
:type _self: core.MarvelObject
:param _list: List of dicts describing a Resource.
:type _list: list
:param _Class: The Resource class to create a list of (Comic, Creator, etc).
:type _Class: core.MarvelObject
:returns: list -- List of Resource instances (Comic, Creator, etc).
"""
items = []
for item in _list:
items.append(_Class(_self.marvel, item))
return items
def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):
"""
Takes a related resource Class
and returns the related resource DataWrapper.
For Example: Given a Character instance, return
a ComicsDataWrapper related to that character.
/character/{characterId}/comics
:param _Class: The Resource class retrieve
:type _Class: core.MarvelObject
:param _ClassDataWrapper: The Resource response object
:type _Class: core.MarvelObject
:param kwargs: dict of query params for the API
:type kwargs: dict
:returns: DataWrapper -- DataWrapper for requested Resource
"""
url = "%s/%s/%s" % (_self.resource_url(), _self.id, _Class.resource_url())
response = json.loads(_self.marvel._call(url, _self.marvel._params(kwargs)).text)
return _ClassDataWrapper(_self.marvel, response)
|
gpennington/PyMarvel | marvel/character.py | CharacterDataWrapper.next | python | def next(self):
self.params['offset'] = str(int(self.params['offset']) + int(self.params['limit']))
return self.marvel.get_characters(self.marvel, (), **self.params) | Returns new CharacterDataWrapper
TODO: Don't raise offset past count - limit | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/character.py#L15-L21 | null | class CharacterDataWrapper(DataWrapper):
@property
def data(self):
return CharacterDataContainer(self.marvel, self.dict['data'])
def previous(self):
"""
Returns new CharacterDataWrapper
TODO: Don't lower offset below 0
"""
self.params['offset'] = str(int(self.params['offset']) - int(self.params['limit']))
return self.marvel.get_characters(self.marvel, (), **self.params)
|
gpennington/PyMarvel | marvel/character.py | CharacterDataWrapper.previous | python | def previous(self):
self.params['offset'] = str(int(self.params['offset']) - int(self.params['limit']))
return self.marvel.get_characters(self.marvel, (), **self.params) | Returns new CharacterDataWrapper
TODO: Don't lower offset below 0 | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/character.py#L23-L29 | null | class CharacterDataWrapper(DataWrapper):
@property
def data(self):
return CharacterDataContainer(self.marvel, self.dict['data'])
def next(self):
"""
Returns new CharacterDataWrapper
TODO: Don't raise offset past count - limit
"""
self.params['offset'] = str(int(self.params['offset']) + int(self.params['limit']))
return self.marvel.get_characters(self.marvel, (), **self.params)
|
gpennington/PyMarvel | marvel/marvel.py | Marvel._call | python | def _call(self, resource_url, params=None):
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url) | Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L38-L55 | [
"def _endpoint(self):\n return \"http://gateway.marvel.com/%s/public/\" % (DEFAULT_API_VERSION)\n",
"def _auth(self):\n \"\"\"\n Creates hash from api keys and returns all required parametsrs\n\n :returns: str -- URL encoded query parameters containing \"ts\", \"apikey\", and \"hash\"\n \"\"\"\n ... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel._auth | python | def _auth(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string) | Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash" | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L69-L77 | null | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_character | python | def get_character(self, id):
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response) | Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L84-L104 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_characters | python | def get_characters(self, *args, **kwargs):
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs) | Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L106-L128 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_comic | python | def get_comic(self, id):
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response) | Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L130-L150 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_comics | python | def get_comics(self, *args, **kwargs):
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response) | Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L152-L170 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_creator | python | def get_creator(self, id):
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response) | Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L173-L193 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_creators | python | def get_creators(self, *args, **kwargs):
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response) | Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L196-L212 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_event | python | def get_event(self, id):
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response) | Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L215-L233 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_events | python | def get_events(self, *args, **kwargs):
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response) | Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L236-L256 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_single_series | python | def get_single_series(self, id):
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response) | Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010) | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L259-L277 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_series | python | def get_series(self, *args, **kwargs):
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response) | Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010) | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L280-L299 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_story | python | def get_story(self, id):
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response) | Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself... | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L301-L319 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/marvel.py | Marvel.get_stories | python | def get_stories(self, *args, **kwargs):
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) | Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477 | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/marvel.py#L322-L342 | [
"def resource_url(cls):\n \"\"\"\n :returns: str -- Resource URL\n \"\"\"\n return cls._resource_url\n",
"def _call(self, resource_url, params=None):\n \"\"\"\n Calls the Marvel API endpoint\n\n :param resource_url: url slug of the resource\n :type resource_url: str\n :param params: qu... | class Marvel(object):
"""Marvel API class
This class provides methods to interface with the Marvel API
>>> m = Marvel("acb123....", "efg456...")
"""
def __init__(self, public_key, private_key):
self.public_key = public_key
self.private_key = private_key
def _endpoint(self):
return "http://gateway.marvel.com/%s/public/" % (DEFAULT_API_VERSION)
def _call(self, resource_url, params=None):
"""
Calls the Marvel API endpoint
:param resource_url: url slug of the resource
:type resource_url: str
:param params: query params to add to endpoint
:type params: str
:returns: response -- Requests response
"""
url = "%s%s" % (self._endpoint(), resource_url)
if params:
url += "?%s&%s" % (params, self._auth())
else:
url += "?%s" % self._auth()
return requests.get(url)
def _params(self, params):
"""
Takes dictionary of parameters and returns
urlencoded string
:param params: Dict of query params to encode
:type params: dict
:returns: str -- URL encoded query parameters
"""
return urllib.urlencode(params)
def _auth(self):
"""
Creates hash from api keys and returns all required parametsrs
:returns: str -- URL encoded query parameters containing "ts", "apikey", and "hash"
"""
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M:%S")
hash_string = hashlib.md5("%s%s%s" % (ts, self.private_key, self.public_key)).hexdigest()
return "ts=%s&apikey=%s&hash=%s" % (ts, self.public_key, hash_string)
#public methods
def get_character(self, id):
"""Fetches a single character by id.
get /v1/public/characters
:param id: ID of Character
:type params: int
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_character(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.results[0].name
Wolverine
"""
url = "%s/%s" % (Character.resource_url(), id)
response = json.loads(self._call(url).text)
return CharacterDataWrapper(self, response)
def get_characters(self, *args, **kwargs):
"""Fetches lists of comic characters with optional filters.
get /v1/public/characters/{characterId}
:returns: CharacterDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_characters(orderBy="name,-modified", limit="5", offset="15")
>>> print cdw.data.count
1401
>>> for result in cdw.data.results:
... print result.name
Aginar
Air-Walker (Gabriel Lan)
Ajak
Ajaxis
Akemi
"""
#pass url string and params string to _call
response = json.loads(self._call(Character.resource_url(), self._params(kwargs)).text)
return CharacterDataWrapper(self, response, kwargs)
def get_comic(self, id):
"""Fetches a single comic by id.
get /v1/public/comics/{comicId}
:param id: ID of Comic
:type params: int
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comic(1009718)
>>> print cdw.data.count
1
>>> print cdw.data.result.name
Some Comic
"""
url = "%s/%s" % (Comic.resource_url(), id)
response = json.loads(self._call(url).text)
return ComicDataWrapper(self, response)
def get_comics(self, *args, **kwargs):
"""
Fetches list of comics.
get /v1/public/comics
:returns: ComicDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_comics(orderBy="issueNumber,-modified", limit="10", offset="15")
>>> print cdw.data.count
10
>>> print cdw.data.results[0].name
Some Comic
"""
response = json.loads(self._call(Comic.resource_url(), self._params(kwargs)).text)
return ComicDataWrapper(self, response)
def get_creator(self, id):
"""Fetches a single creator by id.
get /v1/public/creators/{creatorId}
:param id: ID of Creator
:type params: int
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creator(30)
>>> print cdw.data.count
1
>>> print cdw.data.result.fullName
Stan Lee
"""
url = "%s/%s" % (Creator.resource_url(), id)
response = json.loads(self._call(url).text)
return CreatorDataWrapper(self, response)
def get_creators(self, *args, **kwargs):
"""Fetches lists of creators.
get /v1/public/creators
:returns: CreatorDataWrapper
>>> m = Marvel(public_key, private_key)
>>> cdw = m.get_creators(lastName="Lee", orderBy="firstName,-modified", limit="5", offset="15")
>>> print cdw.data.total
25
>>> print cdw.data.results[0].fullName
Alvin Lee
"""
response = json.loads(self._call(Creator.resource_url(), self._params(kwargs)).text)
return CreatorDataWrapper(self, response)
def get_event(self, id):
"""Fetches a single event by id.
get /v1/public/event/{eventId}
:param id: ID of Event
:type params: int
:returns: EventDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_event(253)
>>> print response.data.result.title
Infinity Gauntlet
"""
url = "%s/%s" % (Event.resource_url(), id)
response = json.loads(self._call(url).text)
return EventDataWrapper(self, response)
def get_events(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: EventDataWrapper
>>> #Find all the events that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_events(characters="1009351,1009718")
>>> print response.data.total
38
>>> events = response.data.results
>>> print events[1].title
Age of Apocalypse
"""
response = json.loads(self._call(Event.resource_url(), self._params(kwargs)).text)
return EventDataWrapper(self, response)
def get_single_series(self, id):
"""Fetches a single comic series by id.
get /v1/public/series/{seriesId}
:param id: ID of Series
:type params: int
:returns: SeriesDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_single_series(12429)
>>> print response.data.result.title
5 Ronin (2010)
"""
url = "%s/%s" % (Series.resource_url(), id)
response = json.loads(self._call(url).text)
return SeriesDataWrapper(self, response)
def get_series(self, *args, **kwargs):
"""Fetches lists of events.
get /v1/public/events
:returns: SeriesDataWrapper
>>> #Find all the series that involved Wolverine
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_series(characters="1009718")
>>> print response.data.total
435
>>> series = response.data.results
>>> print series[0].title
5 Ronin (2010)
"""
response = json.loads(self._call(Series.resource_url(), self._params(kwargs)).text)
return SeriesDataWrapper(self, response)
def get_story(self, id):
"""Fetches a single story by id.
get /v1/public/stories/{storyId}
:param id: ID of Story
:type params: int
:returns: StoryDataWrapper
>>> m = Marvel(public_key, private_key)
>>> response = m.get_story(29)
>>> print response.data.result.title
Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself...
"""
url = "%s/%s" % (Story.resource_url(), id)
response = json.loads(self._call(url).text)
return StoryDataWrapper(self, response)
def get_stories(self, *args, **kwargs):
"""Fetches lists of stories.
get /v1/public/stories
:returns: StoryDataWrapper
>>> #Find all the stories that involved both Hulk and Wolverine
>>> #hulk's id: 1009351
>>> #wolverine's id: 1009718
>>> m = Marvel(public_key, private_key)
>>> response = m.get_stories(characters="1009351,1009718")
>>> print response.data.total
4066
>>> stories = response.data.results
>>> print stories[1].title
Cover #477
"""
response = json.loads(self._call(Story.resource_url(), self._params(kwargs)).text)
return StoryDataWrapper(self, response) |
gpennington/PyMarvel | marvel/story.py | Story.get_creators | python | def get_creators(self, *args, **kwargs):
from .creator import Creator, CreatorDataWrapper
return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs) | Returns a full CreatorDataWrapper object for this story.
/stories/{storyId}/creators
:returns: CreatorDataWrapper -- A new request to API. Contains full results set. | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/story.py#L91-L100 | [
"def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n \"\"\"\n Takes a related resource Class \n and returns the related resource DataWrapper.\n For Example: Given a Character instance, return\n a ComicsDataWrapper related to that character.\n /character/{characterId}/com... | class Story(MarvelObject):
"""
Story object
Takes a dict of character attrs
"""
_resource_url = 'stories'
@property
def id(self):
return self.dict['id']
@property
def title(self):
return self.dict['title']
@property
def description(self):
"""
:returns: str -- A description of the series.
"""
return self.dict['description']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def type(self):
return self.dict['type']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def thumbnail(self):
return Image(self.marvel, self.dict['thumbnail'])
@property
def comics(self):
from .comic import ComicList
return ComicList(self.marvel, self.dict['comics'])
@property
def series(self):
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def events(self):
from .event import EventList
return EventList(self.marvel, self.dict['events'])
@property
def characters(self):
from .character import CharacterList
return CharacterList(self.marvel, self.dict['characters'])
@property
def creators(self):
from .creator import CreatorList
return CreatorList(self.marvel, self.dict['creators'])
@property
def originalIssue(self):
from .comic import ComicSummary
return ComicSummary(self.marvel, self.dict['originalIssue'])
def get_characters(self, *args, **kwargs):
"""
Returns a full CharacterDataWrapper object for this story.
/stories/{storyId}/characters
:returns: CharacterDataWrapper -- A new request to API. Contains full results set.
"""
from .character import Character, CharacterDataWrapper
return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs)
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object this story.
/stories/{seriestoryIdsId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_events(self, *args, **kwargs):
"""
Returns a full EventDataWrapper object this story.
/stories/{storyId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
"""
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
|
gpennington/PyMarvel | marvel/story.py | Story.get_characters | python | def get_characters(self, *args, **kwargs):
from .character import Character, CharacterDataWrapper
return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs) | Returns a full CharacterDataWrapper object for this story.
/stories/{storyId}/characters
:returns: CharacterDataWrapper -- A new request to API. Contains full results set. | train | https://github.com/gpennington/PyMarvel/blob/2617162836f2b7c525ed6c4ff6f1e86a07284fd1/marvel/story.py#L102-L111 | [
"def get_related_resource(_self, _Class, _ClassDataWrapper, *args, **kwargs):\n \"\"\"\n Takes a related resource Class \n and returns the related resource DataWrapper.\n For Example: Given a Character instance, return\n a ComicsDataWrapper related to that character.\n /character/{characterId}/com... | class Story(MarvelObject):
"""
Story object
Takes a dict of character attrs
"""
_resource_url = 'stories'
@property
def id(self):
return self.dict['id']
@property
def title(self):
return self.dict['title']
@property
def description(self):
"""
:returns: str -- A description of the series.
"""
return self.dict['description']
@property
def resourceURI(self):
return self.dict['resourceURI']
@property
def type(self):
return self.dict['type']
@property
def modified(self):
return str_to_datetime(self.dict['modified'])
@property
def modified_raw(self):
return self.dict['modified']
@property
def thumbnail(self):
return Image(self.marvel, self.dict['thumbnail'])
@property
def comics(self):
from .comic import ComicList
return ComicList(self.marvel, self.dict['comics'])
@property
def series(self):
from .series import SeriesList
return SeriesList(self.marvel, self.dict['series'])
@property
def events(self):
from .event import EventList
return EventList(self.marvel, self.dict['events'])
@property
def characters(self):
from .character import CharacterList
return CharacterList(self.marvel, self.dict['characters'])
@property
def creators(self):
from .creator import CreatorList
return CreatorList(self.marvel, self.dict['creators'])
@property
def originalIssue(self):
from .comic import ComicSummary
return ComicSummary(self.marvel, self.dict['originalIssue'])
def get_creators(self, *args, **kwargs):
"""
Returns a full CreatorDataWrapper object for this story.
/stories/{storyId}/creators
:returns: CreatorDataWrapper -- A new request to API. Contains full results set.
"""
from .creator import Creator, CreatorDataWrapper
return self.get_related_resource(Creator, CreatorDataWrapper, args, kwargs)
def get_comics(self, *args, **kwargs):
"""
Returns a full ComicDataWrapper object this story.
/stories/{seriestoryIdsId}/comics
:returns: ComicDataWrapper -- A new request to API. Contains full results set.
"""
from .comic import Comic, ComicDataWrapper
return self.get_related_resource(Comic, ComicDataWrapper, args, kwargs)
def get_events(self, *args, **kwargs):
"""
Returns a full EventDataWrapper object this story.
/stories/{storyId}/events
:returns: EventDataWrapper -- A new request to API. Contains full results set.
"""
from .event import Event, EventDataWrapper
return self.get_related_resource(Event, EventDataWrapper, args, kwargs)
|
mongolab/dex | dex/analyzer.py | QueryAnalyzer.generate_query_report | python | def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):
index_analysis = None
recommendation = None
namespace = parsed_query['ns']
indexStatus = "unknown"
index_cache_entry = self._ensure_index_cache(db_uri,
db_name,
collection_name)
query_analysis = self._generate_query_analysis(parsed_query,
db_name,
collection_name)
if ((query_analysis['analyzedFields'] != []) and
query_analysis['supported']):
index_analysis = self._generate_index_analysis(query_analysis,
index_cache_entry['indexes'])
indexStatus = index_analysis['indexStatus']
if index_analysis['indexStatus'] != 'full':
recommendation = self._generate_recommendation(query_analysis,
db_name,
collection_name)
# a temporary fix to suppress faulty parsing of $regexes.
# if the recommendation cannot be re-parsed into yaml, we assume
# it is invalid.
if not validate_yaml(recommendation['index']):
recommendation = None
query_analysis['supported'] = False
# QUERY REPORT
return OrderedDict({
'queryMask': parsed_query['queryMask'],
'indexStatus': indexStatus,
'parsed': parsed_query,
'namespace': namespace,
'queryAnalysis': query_analysis,
'indexAnalysis': index_analysis,
'recommendation': recommendation
}) | Generates a comprehensive report on the raw query | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L47-L88 | [
"def _ensure_index_cache(self, db_uri, db_name, collection_name):\n \"\"\"Adds a collections index entries to the cache if not present\"\"\"\n if not self._check_indexes or db_uri is None:\n return {'indexes': None}\n if db_name not in self.get_cache():\n self._internal_map[db_name] = {}\n ... | class QueryAnalyzer:
def __init__(self, check_indexes):
self._internal_map = {}
self._check_indexes = check_indexes
self._index_cache_connection = None
############################################################################
############################################################################
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name]
############################################################################
def _generate_query_analysis(self, parsed_query, db_name, collection_name):
"""Translates a raw query object into a Dex query analysis"""
analyzed_fields = []
field_count = 0
supported = True
sort_fields = []
query_mask = None
if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:
supported = False
else:
#if 'orderby' in parsed_query:
sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []
sort_seq = 0
for key in sort_component:
sort_field = {'fieldName': key,
'fieldType': SORT_TYPE,
'seq': sort_seq}
sort_fields.append(key)
analyzed_fields.append(sort_field)
field_count += 1
sort_seq += 1
query_component = parsed_query['query'] if 'query' in parsed_query else {}
for key in query_component:
if key not in sort_fields:
field_type = UNSUPPORTED_TYPE
if ((key not in UNSUPPORTED_QUERY_OPERATORS) and
(key not in COMPOSITE_QUERY_OPERATORS)):
try:
if query_component[key] == {}:
raise
nested_field_list = query_component[key].keys()
except:
field_type = EQUIV_TYPE
else:
for nested_field in nested_field_list:
if ((nested_field in RANGE_QUERY_OPERATORS) and
(nested_field not in UNSUPPORTED_QUERY_OPERATORS)):
field_type = RANGE_TYPE
else:
supported = False
field_type = UNSUPPORTED_TYPE
break
if field_type is UNSUPPORTED_TYPE:
supported = False
analyzed_field = {'fieldName': key,
'fieldType': field_type}
analyzed_fields.append(analyzed_field)
field_count += 1
query_mask = parsed_query['queryMask']
# QUERY ANALYSIS
return OrderedDict({
'analyzedFields': analyzed_fields,
'fieldCount': field_count,
'supported': supported,
'queryMask': query_mask
})
############################################################################
def _generate_index_analysis(self, query_analysis, indexes):
"""Compares a query signature to the index cache to identify complete
and partial indexes available to the query"""
needs_recommendation = True
full_indexes = []
partial_indexes = []
coverage = "unknown"
if indexes is not None:
for index_key in indexes.keys():
index = indexes[index_key]
index_report = self._generate_index_report(index,
query_analysis)
if index_report['supported'] is True:
if index_report['coverage'] == 'full':
full_indexes.append(index_report)
if index_report['idealOrder']:
needs_recommendation = False
elif index_report['coverage'] == 'partial':
partial_indexes.append(index_report)
if len(full_indexes) > 0:
coverage = "full"
elif (len(partial_indexes)) > 0:
coverage = "partial"
elif query_analysis['supported']:
coverage = "none"
# INDEX ANALYSIS
return OrderedDict([('indexStatus', coverage),
('fullIndexes', full_indexes),
('partialIndexes', partial_indexes)])
############################################################################
def _generate_index_report(self, index, query_analysis):
"""Analyzes an existing index against the results of query analysis"""
all_fields = []
equiv_fields = []
sort_fields = []
range_fields = []
for query_field in query_analysis['analyzedFields']:
all_fields.append(query_field['fieldName'])
if query_field['fieldType'] is EQUIV_TYPE:
equiv_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is SORT_TYPE:
sort_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is RANGE_TYPE:
range_fields.append(query_field['fieldName'])
max_equiv_seq = len(equiv_fields)
max_sort_seq = max_equiv_seq + len(sort_fields)
max_range_seq = max_sort_seq + len(range_fields)
coverage = 'none'
query_fields_covered = 0
query_field_count = query_analysis['fieldCount']
supported = True
ideal_order = True
for index_field in index['key']:
field_name = index_field[0]
if index_field[1] == '2d':
supported = False
break
if field_name not in all_fields:
break
if query_fields_covered == 0:
coverage = 'partial'
if query_fields_covered < max_equiv_seq:
if field_name not in equiv_fields:
ideal_order = False
elif query_fields_covered < max_sort_seq:
if field_name not in sort_fields:
ideal_order = False
elif query_fields_covered < max_range_seq:
if field_name not in range_fields:
ideal_order = False
query_fields_covered += 1
if query_fields_covered == query_field_count:
coverage = 'full'
# INDEX REPORT
return OrderedDict({
'coverage': coverage,
'idealOrder': ideal_order,
'queryFieldsCovered': query_fields_covered,
'index': index,
'supported': supported
})
############################################################################
def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))])
############################################################################
def generate_shell_command(self, collection_name, index_rec):
command_string = 'db["' + collection_name + '"].ensureIndex('
command_string += index_rec + ', '
command_string += '{"background": ' + BACKGROUND_FLAG + '})'
return command_string
############################################################################
def get_cache(self):
return self._internal_map
############################################################################
def clear_cache(self):
self._internal_map = {}
|
mongolab/dex | dex/analyzer.py | QueryAnalyzer._ensure_index_cache | python | def _ensure_index_cache(self, db_uri, db_name, collection_name):
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name] | Adds a collections index entries to the cache if not present | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L91-L112 | [
"def get_cache(self):\n return self._internal_map\n"
] | class QueryAnalyzer:
def __init__(self, check_indexes):
self._internal_map = {}
self._check_indexes = check_indexes
self._index_cache_connection = None
############################################################################
def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):
"""Generates a comprehensive report on the raw query"""
index_analysis = None
recommendation = None
namespace = parsed_query['ns']
indexStatus = "unknown"
index_cache_entry = self._ensure_index_cache(db_uri,
db_name,
collection_name)
query_analysis = self._generate_query_analysis(parsed_query,
db_name,
collection_name)
if ((query_analysis['analyzedFields'] != []) and
query_analysis['supported']):
index_analysis = self._generate_index_analysis(query_analysis,
index_cache_entry['indexes'])
indexStatus = index_analysis['indexStatus']
if index_analysis['indexStatus'] != 'full':
recommendation = self._generate_recommendation(query_analysis,
db_name,
collection_name)
# a temporary fix to suppress faulty parsing of $regexes.
# if the recommendation cannot be re-parsed into yaml, we assume
# it is invalid.
if not validate_yaml(recommendation['index']):
recommendation = None
query_analysis['supported'] = False
# QUERY REPORT
return OrderedDict({
'queryMask': parsed_query['queryMask'],
'indexStatus': indexStatus,
'parsed': parsed_query,
'namespace': namespace,
'queryAnalysis': query_analysis,
'indexAnalysis': index_analysis,
'recommendation': recommendation
})
############################################################################
############################################################################
def _generate_query_analysis(self, parsed_query, db_name, collection_name):
"""Translates a raw query object into a Dex query analysis"""
analyzed_fields = []
field_count = 0
supported = True
sort_fields = []
query_mask = None
if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:
supported = False
else:
#if 'orderby' in parsed_query:
sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []
sort_seq = 0
for key in sort_component:
sort_field = {'fieldName': key,
'fieldType': SORT_TYPE,
'seq': sort_seq}
sort_fields.append(key)
analyzed_fields.append(sort_field)
field_count += 1
sort_seq += 1
query_component = parsed_query['query'] if 'query' in parsed_query else {}
for key in query_component:
if key not in sort_fields:
field_type = UNSUPPORTED_TYPE
if ((key not in UNSUPPORTED_QUERY_OPERATORS) and
(key not in COMPOSITE_QUERY_OPERATORS)):
try:
if query_component[key] == {}:
raise
nested_field_list = query_component[key].keys()
except:
field_type = EQUIV_TYPE
else:
for nested_field in nested_field_list:
if ((nested_field in RANGE_QUERY_OPERATORS) and
(nested_field not in UNSUPPORTED_QUERY_OPERATORS)):
field_type = RANGE_TYPE
else:
supported = False
field_type = UNSUPPORTED_TYPE
break
if field_type is UNSUPPORTED_TYPE:
supported = False
analyzed_field = {'fieldName': key,
'fieldType': field_type}
analyzed_fields.append(analyzed_field)
field_count += 1
query_mask = parsed_query['queryMask']
# QUERY ANALYSIS
return OrderedDict({
'analyzedFields': analyzed_fields,
'fieldCount': field_count,
'supported': supported,
'queryMask': query_mask
})
############################################################################
def _generate_index_analysis(self, query_analysis, indexes):
"""Compares a query signature to the index cache to identify complete
and partial indexes available to the query"""
needs_recommendation = True
full_indexes = []
partial_indexes = []
coverage = "unknown"
if indexes is not None:
for index_key in indexes.keys():
index = indexes[index_key]
index_report = self._generate_index_report(index,
query_analysis)
if index_report['supported'] is True:
if index_report['coverage'] == 'full':
full_indexes.append(index_report)
if index_report['idealOrder']:
needs_recommendation = False
elif index_report['coverage'] == 'partial':
partial_indexes.append(index_report)
if len(full_indexes) > 0:
coverage = "full"
elif (len(partial_indexes)) > 0:
coverage = "partial"
elif query_analysis['supported']:
coverage = "none"
# INDEX ANALYSIS
return OrderedDict([('indexStatus', coverage),
('fullIndexes', full_indexes),
('partialIndexes', partial_indexes)])
############################################################################
def _generate_index_report(self, index, query_analysis):
"""Analyzes an existing index against the results of query analysis"""
all_fields = []
equiv_fields = []
sort_fields = []
range_fields = []
for query_field in query_analysis['analyzedFields']:
all_fields.append(query_field['fieldName'])
if query_field['fieldType'] is EQUIV_TYPE:
equiv_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is SORT_TYPE:
sort_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is RANGE_TYPE:
range_fields.append(query_field['fieldName'])
max_equiv_seq = len(equiv_fields)
max_sort_seq = max_equiv_seq + len(sort_fields)
max_range_seq = max_sort_seq + len(range_fields)
coverage = 'none'
query_fields_covered = 0
query_field_count = query_analysis['fieldCount']
supported = True
ideal_order = True
for index_field in index['key']:
field_name = index_field[0]
if index_field[1] == '2d':
supported = False
break
if field_name not in all_fields:
break
if query_fields_covered == 0:
coverage = 'partial'
if query_fields_covered < max_equiv_seq:
if field_name not in equiv_fields:
ideal_order = False
elif query_fields_covered < max_sort_seq:
if field_name not in sort_fields:
ideal_order = False
elif query_fields_covered < max_range_seq:
if field_name not in range_fields:
ideal_order = False
query_fields_covered += 1
if query_fields_covered == query_field_count:
coverage = 'full'
# INDEX REPORT
return OrderedDict({
'coverage': coverage,
'idealOrder': ideal_order,
'queryFieldsCovered': query_fields_covered,
'index': index,
'supported': supported
})
############################################################################
def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))])
############################################################################
def generate_shell_command(self, collection_name, index_rec):
command_string = 'db["' + collection_name + '"].ensureIndex('
command_string += index_rec + ', '
command_string += '{"background": ' + BACKGROUND_FLAG + '})'
return command_string
############################################################################
def get_cache(self):
return self._internal_map
############################################################################
def clear_cache(self):
self._internal_map = {}
|
mongolab/dex | dex/analyzer.py | QueryAnalyzer._generate_query_analysis | python | def _generate_query_analysis(self, parsed_query, db_name, collection_name):
analyzed_fields = []
field_count = 0
supported = True
sort_fields = []
query_mask = None
if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:
supported = False
else:
#if 'orderby' in parsed_query:
sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []
sort_seq = 0
for key in sort_component:
sort_field = {'fieldName': key,
'fieldType': SORT_TYPE,
'seq': sort_seq}
sort_fields.append(key)
analyzed_fields.append(sort_field)
field_count += 1
sort_seq += 1
query_component = parsed_query['query'] if 'query' in parsed_query else {}
for key in query_component:
if key not in sort_fields:
field_type = UNSUPPORTED_TYPE
if ((key not in UNSUPPORTED_QUERY_OPERATORS) and
(key not in COMPOSITE_QUERY_OPERATORS)):
try:
if query_component[key] == {}:
raise
nested_field_list = query_component[key].keys()
except:
field_type = EQUIV_TYPE
else:
for nested_field in nested_field_list:
if ((nested_field in RANGE_QUERY_OPERATORS) and
(nested_field not in UNSUPPORTED_QUERY_OPERATORS)):
field_type = RANGE_TYPE
else:
supported = False
field_type = UNSUPPORTED_TYPE
break
if field_type is UNSUPPORTED_TYPE:
supported = False
analyzed_field = {'fieldName': key,
'fieldType': field_type}
analyzed_fields.append(analyzed_field)
field_count += 1
query_mask = parsed_query['queryMask']
# QUERY ANALYSIS
return OrderedDict({
'analyzedFields': analyzed_fields,
'fieldCount': field_count,
'supported': supported,
'queryMask': query_mask
}) | Translates a raw query object into a Dex query analysis | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L115-L177 | null | class QueryAnalyzer:
def __init__(self, check_indexes):
self._internal_map = {}
self._check_indexes = check_indexes
self._index_cache_connection = None
############################################################################
def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):
"""Generates a comprehensive report on the raw query"""
index_analysis = None
recommendation = None
namespace = parsed_query['ns']
indexStatus = "unknown"
index_cache_entry = self._ensure_index_cache(db_uri,
db_name,
collection_name)
query_analysis = self._generate_query_analysis(parsed_query,
db_name,
collection_name)
if ((query_analysis['analyzedFields'] != []) and
query_analysis['supported']):
index_analysis = self._generate_index_analysis(query_analysis,
index_cache_entry['indexes'])
indexStatus = index_analysis['indexStatus']
if index_analysis['indexStatus'] != 'full':
recommendation = self._generate_recommendation(query_analysis,
db_name,
collection_name)
# a temporary fix to suppress faulty parsing of $regexes.
# if the recommendation cannot be re-parsed into yaml, we assume
# it is invalid.
if not validate_yaml(recommendation['index']):
recommendation = None
query_analysis['supported'] = False
# QUERY REPORT
return OrderedDict({
'queryMask': parsed_query['queryMask'],
'indexStatus': indexStatus,
'parsed': parsed_query,
'namespace': namespace,
'queryAnalysis': query_analysis,
'indexAnalysis': index_analysis,
'recommendation': recommendation
})
############################################################################
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name]
############################################################################
############################################################################
def _generate_index_analysis(self, query_analysis, indexes):
"""Compares a query signature to the index cache to identify complete
and partial indexes available to the query"""
needs_recommendation = True
full_indexes = []
partial_indexes = []
coverage = "unknown"
if indexes is not None:
for index_key in indexes.keys():
index = indexes[index_key]
index_report = self._generate_index_report(index,
query_analysis)
if index_report['supported'] is True:
if index_report['coverage'] == 'full':
full_indexes.append(index_report)
if index_report['idealOrder']:
needs_recommendation = False
elif index_report['coverage'] == 'partial':
partial_indexes.append(index_report)
if len(full_indexes) > 0:
coverage = "full"
elif (len(partial_indexes)) > 0:
coverage = "partial"
elif query_analysis['supported']:
coverage = "none"
# INDEX ANALYSIS
return OrderedDict([('indexStatus', coverage),
('fullIndexes', full_indexes),
('partialIndexes', partial_indexes)])
############################################################################
def _generate_index_report(self, index, query_analysis):
"""Analyzes an existing index against the results of query analysis"""
all_fields = []
equiv_fields = []
sort_fields = []
range_fields = []
for query_field in query_analysis['analyzedFields']:
all_fields.append(query_field['fieldName'])
if query_field['fieldType'] is EQUIV_TYPE:
equiv_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is SORT_TYPE:
sort_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is RANGE_TYPE:
range_fields.append(query_field['fieldName'])
max_equiv_seq = len(equiv_fields)
max_sort_seq = max_equiv_seq + len(sort_fields)
max_range_seq = max_sort_seq + len(range_fields)
coverage = 'none'
query_fields_covered = 0
query_field_count = query_analysis['fieldCount']
supported = True
ideal_order = True
for index_field in index['key']:
field_name = index_field[0]
if index_field[1] == '2d':
supported = False
break
if field_name not in all_fields:
break
if query_fields_covered == 0:
coverage = 'partial'
if query_fields_covered < max_equiv_seq:
if field_name not in equiv_fields:
ideal_order = False
elif query_fields_covered < max_sort_seq:
if field_name not in sort_fields:
ideal_order = False
elif query_fields_covered < max_range_seq:
if field_name not in range_fields:
ideal_order = False
query_fields_covered += 1
if query_fields_covered == query_field_count:
coverage = 'full'
# INDEX REPORT
return OrderedDict({
'coverage': coverage,
'idealOrder': ideal_order,
'queryFieldsCovered': query_fields_covered,
'index': index,
'supported': supported
})
############################################################################
def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))])
############################################################################
def generate_shell_command(self, collection_name, index_rec):
command_string = 'db["' + collection_name + '"].ensureIndex('
command_string += index_rec + ', '
command_string += '{"background": ' + BACKGROUND_FLAG + '})'
return command_string
############################################################################
def get_cache(self):
return self._internal_map
############################################################################
def clear_cache(self):
self._internal_map = {}
|
mongolab/dex | dex/analyzer.py | QueryAnalyzer._generate_index_analysis | python | def _generate_index_analysis(self, query_analysis, indexes):
needs_recommendation = True
full_indexes = []
partial_indexes = []
coverage = "unknown"
if indexes is not None:
for index_key in indexes.keys():
index = indexes[index_key]
index_report = self._generate_index_report(index,
query_analysis)
if index_report['supported'] is True:
if index_report['coverage'] == 'full':
full_indexes.append(index_report)
if index_report['idealOrder']:
needs_recommendation = False
elif index_report['coverage'] == 'partial':
partial_indexes.append(index_report)
if len(full_indexes) > 0:
coverage = "full"
elif (len(partial_indexes)) > 0:
coverage = "partial"
elif query_analysis['supported']:
coverage = "none"
# INDEX ANALYSIS
return OrderedDict([('indexStatus', coverage),
('fullIndexes', full_indexes),
('partialIndexes', partial_indexes)]) | Compares a query signature to the index cache to identify complete
and partial indexes available to the query | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L180-L211 | [
"def _generate_index_report(self, index, query_analysis):\n \"\"\"Analyzes an existing index against the results of query analysis\"\"\"\n\n all_fields = []\n equiv_fields = []\n sort_fields = []\n range_fields = []\n\n for query_field in query_analysis['analyzedFields']:\n all_fields.appen... | class QueryAnalyzer:
def __init__(self, check_indexes):
self._internal_map = {}
self._check_indexes = check_indexes
self._index_cache_connection = None
############################################################################
def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):
"""Generates a comprehensive report on the raw query"""
index_analysis = None
recommendation = None
namespace = parsed_query['ns']
indexStatus = "unknown"
index_cache_entry = self._ensure_index_cache(db_uri,
db_name,
collection_name)
query_analysis = self._generate_query_analysis(parsed_query,
db_name,
collection_name)
if ((query_analysis['analyzedFields'] != []) and
query_analysis['supported']):
index_analysis = self._generate_index_analysis(query_analysis,
index_cache_entry['indexes'])
indexStatus = index_analysis['indexStatus']
if index_analysis['indexStatus'] != 'full':
recommendation = self._generate_recommendation(query_analysis,
db_name,
collection_name)
# a temporary fix to suppress faulty parsing of $regexes.
# if the recommendation cannot be re-parsed into yaml, we assume
# it is invalid.
if not validate_yaml(recommendation['index']):
recommendation = None
query_analysis['supported'] = False
# QUERY REPORT
return OrderedDict({
'queryMask': parsed_query['queryMask'],
'indexStatus': indexStatus,
'parsed': parsed_query,
'namespace': namespace,
'queryAnalysis': query_analysis,
'indexAnalysis': index_analysis,
'recommendation': recommendation
})
############################################################################
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name]
############################################################################
def _generate_query_analysis(self, parsed_query, db_name, collection_name):
"""Translates a raw query object into a Dex query analysis"""
analyzed_fields = []
field_count = 0
supported = True
sort_fields = []
query_mask = None
if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:
supported = False
else:
#if 'orderby' in parsed_query:
sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []
sort_seq = 0
for key in sort_component:
sort_field = {'fieldName': key,
'fieldType': SORT_TYPE,
'seq': sort_seq}
sort_fields.append(key)
analyzed_fields.append(sort_field)
field_count += 1
sort_seq += 1
query_component = parsed_query['query'] if 'query' in parsed_query else {}
for key in query_component:
if key not in sort_fields:
field_type = UNSUPPORTED_TYPE
if ((key not in UNSUPPORTED_QUERY_OPERATORS) and
(key not in COMPOSITE_QUERY_OPERATORS)):
try:
if query_component[key] == {}:
raise
nested_field_list = query_component[key].keys()
except:
field_type = EQUIV_TYPE
else:
for nested_field in nested_field_list:
if ((nested_field in RANGE_QUERY_OPERATORS) and
(nested_field not in UNSUPPORTED_QUERY_OPERATORS)):
field_type = RANGE_TYPE
else:
supported = False
field_type = UNSUPPORTED_TYPE
break
if field_type is UNSUPPORTED_TYPE:
supported = False
analyzed_field = {'fieldName': key,
'fieldType': field_type}
analyzed_fields.append(analyzed_field)
field_count += 1
query_mask = parsed_query['queryMask']
# QUERY ANALYSIS
return OrderedDict({
'analyzedFields': analyzed_fields,
'fieldCount': field_count,
'supported': supported,
'queryMask': query_mask
})
############################################################################
############################################################################
def _generate_index_report(self, index, query_analysis):
"""Analyzes an existing index against the results of query analysis"""
all_fields = []
equiv_fields = []
sort_fields = []
range_fields = []
for query_field in query_analysis['analyzedFields']:
all_fields.append(query_field['fieldName'])
if query_field['fieldType'] is EQUIV_TYPE:
equiv_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is SORT_TYPE:
sort_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is RANGE_TYPE:
range_fields.append(query_field['fieldName'])
max_equiv_seq = len(equiv_fields)
max_sort_seq = max_equiv_seq + len(sort_fields)
max_range_seq = max_sort_seq + len(range_fields)
coverage = 'none'
query_fields_covered = 0
query_field_count = query_analysis['fieldCount']
supported = True
ideal_order = True
for index_field in index['key']:
field_name = index_field[0]
if index_field[1] == '2d':
supported = False
break
if field_name not in all_fields:
break
if query_fields_covered == 0:
coverage = 'partial'
if query_fields_covered < max_equiv_seq:
if field_name not in equiv_fields:
ideal_order = False
elif query_fields_covered < max_sort_seq:
if field_name not in sort_fields:
ideal_order = False
elif query_fields_covered < max_range_seq:
if field_name not in range_fields:
ideal_order = False
query_fields_covered += 1
if query_fields_covered == query_field_count:
coverage = 'full'
# INDEX REPORT
return OrderedDict({
'coverage': coverage,
'idealOrder': ideal_order,
'queryFieldsCovered': query_fields_covered,
'index': index,
'supported': supported
})
############################################################################
def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))])
############################################################################
def generate_shell_command(self, collection_name, index_rec):
command_string = 'db["' + collection_name + '"].ensureIndex('
command_string += index_rec + ', '
command_string += '{"background": ' + BACKGROUND_FLAG + '})'
return command_string
############################################################################
def get_cache(self):
return self._internal_map
############################################################################
def clear_cache(self):
self._internal_map = {}
|
mongolab/dex | dex/analyzer.py | QueryAnalyzer._generate_index_report | python | def _generate_index_report(self, index, query_analysis):
all_fields = []
equiv_fields = []
sort_fields = []
range_fields = []
for query_field in query_analysis['analyzedFields']:
all_fields.append(query_field['fieldName'])
if query_field['fieldType'] is EQUIV_TYPE:
equiv_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is SORT_TYPE:
sort_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is RANGE_TYPE:
range_fields.append(query_field['fieldName'])
max_equiv_seq = len(equiv_fields)
max_sort_seq = max_equiv_seq + len(sort_fields)
max_range_seq = max_sort_seq + len(range_fields)
coverage = 'none'
query_fields_covered = 0
query_field_count = query_analysis['fieldCount']
supported = True
ideal_order = True
for index_field in index['key']:
field_name = index_field[0]
if index_field[1] == '2d':
supported = False
break
if field_name not in all_fields:
break
if query_fields_covered == 0:
coverage = 'partial'
if query_fields_covered < max_equiv_seq:
if field_name not in equiv_fields:
ideal_order = False
elif query_fields_covered < max_sort_seq:
if field_name not in sort_fields:
ideal_order = False
elif query_fields_covered < max_range_seq:
if field_name not in range_fields:
ideal_order = False
query_fields_covered += 1
if query_fields_covered == query_field_count:
coverage = 'full'
# INDEX REPORT
return OrderedDict({
'coverage': coverage,
'idealOrder': ideal_order,
'queryFieldsCovered': query_fields_covered,
'index': index,
'supported': supported
}) | Analyzes an existing index against the results of query analysis | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L214-L273 | null | class QueryAnalyzer:
def __init__(self, check_indexes):
self._internal_map = {}
self._check_indexes = check_indexes
self._index_cache_connection = None
############################################################################
def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):
"""Generates a comprehensive report on the raw query"""
index_analysis = None
recommendation = None
namespace = parsed_query['ns']
indexStatus = "unknown"
index_cache_entry = self._ensure_index_cache(db_uri,
db_name,
collection_name)
query_analysis = self._generate_query_analysis(parsed_query,
db_name,
collection_name)
if ((query_analysis['analyzedFields'] != []) and
query_analysis['supported']):
index_analysis = self._generate_index_analysis(query_analysis,
index_cache_entry['indexes'])
indexStatus = index_analysis['indexStatus']
if index_analysis['indexStatus'] != 'full':
recommendation = self._generate_recommendation(query_analysis,
db_name,
collection_name)
# a temporary fix to suppress faulty parsing of $regexes.
# if the recommendation cannot be re-parsed into yaml, we assume
# it is invalid.
if not validate_yaml(recommendation['index']):
recommendation = None
query_analysis['supported'] = False
# QUERY REPORT
return OrderedDict({
'queryMask': parsed_query['queryMask'],
'indexStatus': indexStatus,
'parsed': parsed_query,
'namespace': namespace,
'queryAnalysis': query_analysis,
'indexAnalysis': index_analysis,
'recommendation': recommendation
})
############################################################################
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name]
############################################################################
def _generate_query_analysis(self, parsed_query, db_name, collection_name):
"""Translates a raw query object into a Dex query analysis"""
analyzed_fields = []
field_count = 0
supported = True
sort_fields = []
query_mask = None
if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:
supported = False
else:
#if 'orderby' in parsed_query:
sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []
sort_seq = 0
for key in sort_component:
sort_field = {'fieldName': key,
'fieldType': SORT_TYPE,
'seq': sort_seq}
sort_fields.append(key)
analyzed_fields.append(sort_field)
field_count += 1
sort_seq += 1
query_component = parsed_query['query'] if 'query' in parsed_query else {}
for key in query_component:
if key not in sort_fields:
field_type = UNSUPPORTED_TYPE
if ((key not in UNSUPPORTED_QUERY_OPERATORS) and
(key not in COMPOSITE_QUERY_OPERATORS)):
try:
if query_component[key] == {}:
raise
nested_field_list = query_component[key].keys()
except:
field_type = EQUIV_TYPE
else:
for nested_field in nested_field_list:
if ((nested_field in RANGE_QUERY_OPERATORS) and
(nested_field not in UNSUPPORTED_QUERY_OPERATORS)):
field_type = RANGE_TYPE
else:
supported = False
field_type = UNSUPPORTED_TYPE
break
if field_type is UNSUPPORTED_TYPE:
supported = False
analyzed_field = {'fieldName': key,
'fieldType': field_type}
analyzed_fields.append(analyzed_field)
field_count += 1
query_mask = parsed_query['queryMask']
# QUERY ANALYSIS
return OrderedDict({
'analyzedFields': analyzed_fields,
'fieldCount': field_count,
'supported': supported,
'queryMask': query_mask
})
############################################################################
def _generate_index_analysis(self, query_analysis, indexes):
"""Compares a query signature to the index cache to identify complete
and partial indexes available to the query"""
needs_recommendation = True
full_indexes = []
partial_indexes = []
coverage = "unknown"
if indexes is not None:
for index_key in indexes.keys():
index = indexes[index_key]
index_report = self._generate_index_report(index,
query_analysis)
if index_report['supported'] is True:
if index_report['coverage'] == 'full':
full_indexes.append(index_report)
if index_report['idealOrder']:
needs_recommendation = False
elif index_report['coverage'] == 'partial':
partial_indexes.append(index_report)
if len(full_indexes) > 0:
coverage = "full"
elif (len(partial_indexes)) > 0:
coverage = "partial"
elif query_analysis['supported']:
coverage = "none"
# INDEX ANALYSIS
return OrderedDict([('indexStatus', coverage),
('fullIndexes', full_indexes),
('partialIndexes', partial_indexes)])
############################################################################
############################################################################
def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
"""Generates an ideal query recommendation"""
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))])
############################################################################
def generate_shell_command(self, collection_name, index_rec):
command_string = 'db["' + collection_name + '"].ensureIndex('
command_string += index_rec + ', '
command_string += '{"background": ' + BACKGROUND_FLAG + '})'
return command_string
############################################################################
def get_cache(self):
return self._internal_map
############################################################################
def clear_cache(self):
self._internal_map = {}
|
mongolab/dex | dex/analyzer.py | QueryAnalyzer._generate_recommendation | python | def _generate_recommendation(self,
query_analysis,
db_name,
collection_name):
index_rec = '{'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is EQUIV_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is SORT_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
for query_field in query_analysis['analyzedFields']:
if query_field['fieldType'] is RANGE_TYPE:
if len(index_rec) is not 1:
index_rec += ', '
index_rec += '"' + query_field['fieldName'] + '": 1'
index_rec += '}'
# RECOMMENDATION
return OrderedDict([('index',index_rec),
('shellCommand', self.generate_shell_command(collection_name, index_rec))]) | Generates an ideal query recommendation | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L276-L301 | [
"def generate_shell_command(self, collection_name, index_rec):\n command_string = 'db[\"' + collection_name + '\"].ensureIndex('\n command_string += index_rec + ', '\n command_string += '{\"background\": ' + BACKGROUND_FLAG + '})'\n return command_string\n"
] | class QueryAnalyzer:
def __init__(self, check_indexes):
self._internal_map = {}
self._check_indexes = check_indexes
self._index_cache_connection = None
############################################################################
def generate_query_report(self, db_uri, parsed_query, db_name, collection_name):
"""Generates a comprehensive report on the raw query"""
index_analysis = None
recommendation = None
namespace = parsed_query['ns']
indexStatus = "unknown"
index_cache_entry = self._ensure_index_cache(db_uri,
db_name,
collection_name)
query_analysis = self._generate_query_analysis(parsed_query,
db_name,
collection_name)
if ((query_analysis['analyzedFields'] != []) and
query_analysis['supported']):
index_analysis = self._generate_index_analysis(query_analysis,
index_cache_entry['indexes'])
indexStatus = index_analysis['indexStatus']
if index_analysis['indexStatus'] != 'full':
recommendation = self._generate_recommendation(query_analysis,
db_name,
collection_name)
# a temporary fix to suppress faulty parsing of $regexes.
# if the recommendation cannot be re-parsed into yaml, we assume
# it is invalid.
if not validate_yaml(recommendation['index']):
recommendation = None
query_analysis['supported'] = False
# QUERY REPORT
return OrderedDict({
'queryMask': parsed_query['queryMask'],
'indexStatus': indexStatus,
'parsed': parsed_query,
'namespace': namespace,
'queryAnalysis': query_analysis,
'indexAnalysis': index_analysis,
'recommendation': recommendation
})
############################################################################
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name]
############################################################################
def _generate_query_analysis(self, parsed_query, db_name, collection_name):
"""Translates a raw query object into a Dex query analysis"""
analyzed_fields = []
field_count = 0
supported = True
sort_fields = []
query_mask = None
if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS:
supported = False
else:
#if 'orderby' in parsed_query:
sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else []
sort_seq = 0
for key in sort_component:
sort_field = {'fieldName': key,
'fieldType': SORT_TYPE,
'seq': sort_seq}
sort_fields.append(key)
analyzed_fields.append(sort_field)
field_count += 1
sort_seq += 1
query_component = parsed_query['query'] if 'query' in parsed_query else {}
for key in query_component:
if key not in sort_fields:
field_type = UNSUPPORTED_TYPE
if ((key not in UNSUPPORTED_QUERY_OPERATORS) and
(key not in COMPOSITE_QUERY_OPERATORS)):
try:
if query_component[key] == {}:
raise
nested_field_list = query_component[key].keys()
except:
field_type = EQUIV_TYPE
else:
for nested_field in nested_field_list:
if ((nested_field in RANGE_QUERY_OPERATORS) and
(nested_field not in UNSUPPORTED_QUERY_OPERATORS)):
field_type = RANGE_TYPE
else:
supported = False
field_type = UNSUPPORTED_TYPE
break
if field_type is UNSUPPORTED_TYPE:
supported = False
analyzed_field = {'fieldName': key,
'fieldType': field_type}
analyzed_fields.append(analyzed_field)
field_count += 1
query_mask = parsed_query['queryMask']
# QUERY ANALYSIS
return OrderedDict({
'analyzedFields': analyzed_fields,
'fieldCount': field_count,
'supported': supported,
'queryMask': query_mask
})
############################################################################
def _generate_index_analysis(self, query_analysis, indexes):
"""Compares a query signature to the index cache to identify complete
and partial indexes available to the query"""
needs_recommendation = True
full_indexes = []
partial_indexes = []
coverage = "unknown"
if indexes is not None:
for index_key in indexes.keys():
index = indexes[index_key]
index_report = self._generate_index_report(index,
query_analysis)
if index_report['supported'] is True:
if index_report['coverage'] == 'full':
full_indexes.append(index_report)
if index_report['idealOrder']:
needs_recommendation = False
elif index_report['coverage'] == 'partial':
partial_indexes.append(index_report)
if len(full_indexes) > 0:
coverage = "full"
elif (len(partial_indexes)) > 0:
coverage = "partial"
elif query_analysis['supported']:
coverage = "none"
# INDEX ANALYSIS
return OrderedDict([('indexStatus', coverage),
('fullIndexes', full_indexes),
('partialIndexes', partial_indexes)])
############################################################################
def _generate_index_report(self, index, query_analysis):
"""Analyzes an existing index against the results of query analysis"""
all_fields = []
equiv_fields = []
sort_fields = []
range_fields = []
for query_field in query_analysis['analyzedFields']:
all_fields.append(query_field['fieldName'])
if query_field['fieldType'] is EQUIV_TYPE:
equiv_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is SORT_TYPE:
sort_fields.append(query_field['fieldName'])
elif query_field['fieldType'] is RANGE_TYPE:
range_fields.append(query_field['fieldName'])
max_equiv_seq = len(equiv_fields)
max_sort_seq = max_equiv_seq + len(sort_fields)
max_range_seq = max_sort_seq + len(range_fields)
coverage = 'none'
query_fields_covered = 0
query_field_count = query_analysis['fieldCount']
supported = True
ideal_order = True
for index_field in index['key']:
field_name = index_field[0]
if index_field[1] == '2d':
supported = False
break
if field_name not in all_fields:
break
if query_fields_covered == 0:
coverage = 'partial'
if query_fields_covered < max_equiv_seq:
if field_name not in equiv_fields:
ideal_order = False
elif query_fields_covered < max_sort_seq:
if field_name not in sort_fields:
ideal_order = False
elif query_fields_covered < max_range_seq:
if field_name not in range_fields:
ideal_order = False
query_fields_covered += 1
if query_fields_covered == query_field_count:
coverage = 'full'
# INDEX REPORT
return OrderedDict({
'coverage': coverage,
'idealOrder': ideal_order,
'queryFieldsCovered': query_fields_covered,
'index': index,
'supported': supported
})
############################################################################
############################################################################
def generate_shell_command(self, collection_name, index_rec):
command_string = 'db["' + collection_name + '"].ensureIndex('
command_string += index_rec + ', '
command_string += '{"background": ' + BACKGROUND_FLAG + '})'
return command_string
############################################################################
def get_cache(self):
return self._internal_map
############################################################################
def clear_cache(self):
self._internal_map = {}
|
mongolab/dex | dex/analyzer.py | ReportAggregation.add_query_occurrence | python | def add_query_occurrence(self, report):
initial_millis = int(report['parsed']['stats']['millis'])
mask = report['queryMask']
existing_report = self._get_existing_report(mask, report)
if existing_report is not None:
self._merge_report(existing_report, report)
else:
time = None
if 'ts' in report['parsed']:
time = report['parsed']['ts']
self._reports.append(OrderedDict([
('namespace', report['namespace']),
('lastSeenDate', time),
('queryMask', mask),
('supported', report['queryAnalysis']['supported']),
('indexStatus', report['indexStatus']),
('recommendation', report['recommendation']),
('stats', OrderedDict([('count', 1),
('totalTimeMillis', initial_millis),
('avgTimeMillis', initial_millis)]))])) | Adds a report to the report aggregation | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L327-L350 | [
"def _get_existing_report(self, mask, report):\n \"\"\"Returns the aggregated report that matches report\"\"\"\n for existing_report in self._reports:\n if existing_report['namespace'] == report['namespace']:\n if mask == existing_report['queryMask']:\n return existing_report\... | class ReportAggregation:
def __init__(self):
self._reports = []
############################################################################
############################################################################
def get_reports(self):
"""Returns a minimized version of the aggregation"""
return sorted(self._reports,
key=lambda x: x['stats']['totalTimeMillis'],
reverse=True)
############################################################################
def _get_existing_report(self, mask, report):
"""Returns the aggregated report that matches report"""
for existing_report in self._reports:
if existing_report['namespace'] == report['namespace']:
if mask == existing_report['queryMask']:
return existing_report
return None
############################################################################
def _merge_report(self, target, new):
"""Merges a new report into the target report"""
time = None
if 'ts' in new['parsed']:
time = new['parsed']['ts']
if (target.get('lastSeenDate', None) and
time and
target['lastSeenDate'] < time):
target['lastSeenDate'] = time
query_millis = int(new['parsed']['stats']['millis'])
target['stats']['totalTimeMillis'] += query_millis
target['stats']['count'] += 1
target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']
|
mongolab/dex | dex/analyzer.py | ReportAggregation.get_reports | python | def get_reports(self):
return sorted(self._reports,
key=lambda x: x['stats']['totalTimeMillis'],
reverse=True) | Returns a minimized version of the aggregation | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L353-L357 | null | class ReportAggregation:
def __init__(self):
self._reports = []
############################################################################
def add_query_occurrence(self, report):
"""Adds a report to the report aggregation"""
initial_millis = int(report['parsed']['stats']['millis'])
mask = report['queryMask']
existing_report = self._get_existing_report(mask, report)
if existing_report is not None:
self._merge_report(existing_report, report)
else:
time = None
if 'ts' in report['parsed']:
time = report['parsed']['ts']
self._reports.append(OrderedDict([
('namespace', report['namespace']),
('lastSeenDate', time),
('queryMask', mask),
('supported', report['queryAnalysis']['supported']),
('indexStatus', report['indexStatus']),
('recommendation', report['recommendation']),
('stats', OrderedDict([('count', 1),
('totalTimeMillis', initial_millis),
('avgTimeMillis', initial_millis)]))]))
############################################################################
############################################################################
def _get_existing_report(self, mask, report):
"""Returns the aggregated report that matches report"""
for existing_report in self._reports:
if existing_report['namespace'] == report['namespace']:
if mask == existing_report['queryMask']:
return existing_report
return None
############################################################################
def _merge_report(self, target, new):
"""Merges a new report into the target report"""
time = None
if 'ts' in new['parsed']:
time = new['parsed']['ts']
if (target.get('lastSeenDate', None) and
time and
target['lastSeenDate'] < time):
target['lastSeenDate'] = time
query_millis = int(new['parsed']['stats']['millis'])
target['stats']['totalTimeMillis'] += query_millis
target['stats']['count'] += 1
target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']
|
mongolab/dex | dex/analyzer.py | ReportAggregation._get_existing_report | python | def _get_existing_report(self, mask, report):
for existing_report in self._reports:
if existing_report['namespace'] == report['namespace']:
if mask == existing_report['queryMask']:
return existing_report
return None | Returns the aggregated report that matches report | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L360-L366 | null | class ReportAggregation:
def __init__(self):
self._reports = []
############################################################################
def add_query_occurrence(self, report):
"""Adds a report to the report aggregation"""
initial_millis = int(report['parsed']['stats']['millis'])
mask = report['queryMask']
existing_report = self._get_existing_report(mask, report)
if existing_report is not None:
self._merge_report(existing_report, report)
else:
time = None
if 'ts' in report['parsed']:
time = report['parsed']['ts']
self._reports.append(OrderedDict([
('namespace', report['namespace']),
('lastSeenDate', time),
('queryMask', mask),
('supported', report['queryAnalysis']['supported']),
('indexStatus', report['indexStatus']),
('recommendation', report['recommendation']),
('stats', OrderedDict([('count', 1),
('totalTimeMillis', initial_millis),
('avgTimeMillis', initial_millis)]))]))
############################################################################
def get_reports(self):
"""Returns a minimized version of the aggregation"""
return sorted(self._reports,
key=lambda x: x['stats']['totalTimeMillis'],
reverse=True)
############################################################################
############################################################################
def _merge_report(self, target, new):
"""Merges a new report into the target report"""
time = None
if 'ts' in new['parsed']:
time = new['parsed']['ts']
if (target.get('lastSeenDate', None) and
time and
target['lastSeenDate'] < time):
target['lastSeenDate'] = time
query_millis = int(new['parsed']['stats']['millis'])
target['stats']['totalTimeMillis'] += query_millis
target['stats']['count'] += 1
target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']
|
mongolab/dex | dex/analyzer.py | ReportAggregation._merge_report | python | def _merge_report(self, target, new):
time = None
if 'ts' in new['parsed']:
time = new['parsed']['ts']
if (target.get('lastSeenDate', None) and
time and
target['lastSeenDate'] < time):
target['lastSeenDate'] = time
query_millis = int(new['parsed']['stats']['millis'])
target['stats']['totalTimeMillis'] += query_millis
target['stats']['count'] += 1
target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count'] | Merges a new report into the target report | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/analyzer.py#L369-L383 | null | class ReportAggregation:
def __init__(self):
self._reports = []
############################################################################
def add_query_occurrence(self, report):
"""Adds a report to the report aggregation"""
initial_millis = int(report['parsed']['stats']['millis'])
mask = report['queryMask']
existing_report = self._get_existing_report(mask, report)
if existing_report is not None:
self._merge_report(existing_report, report)
else:
time = None
if 'ts' in report['parsed']:
time = report['parsed']['ts']
self._reports.append(OrderedDict([
('namespace', report['namespace']),
('lastSeenDate', time),
('queryMask', mask),
('supported', report['queryAnalysis']['supported']),
('indexStatus', report['indexStatus']),
('recommendation', report['recommendation']),
('stats', OrderedDict([('count', 1),
('totalTimeMillis', initial_millis),
('avgTimeMillis', initial_millis)]))]))
############################################################################
def get_reports(self):
"""Returns a minimized version of the aggregation"""
return sorted(self._reports,
key=lambda x: x['stats']['totalTimeMillis'],
reverse=True)
############################################################################
def _get_existing_report(self, mask, report):
"""Returns the aggregated report that matches report"""
for existing_report in self._reports:
if existing_report['namespace'] == report['namespace']:
if mask == existing_report['queryMask']:
return existing_report
return None
############################################################################
|
mongolab/dex | dex/parsers.py | Parser.parse | python | def parse(self, input):
query = None
for handler in self._line_handlers:
try:
query = handler.handle(input)
except Exception as e:
query = None
finally:
if query is not None:
return query
return None | Passes input to each QueryLineHandler in use | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/parsers.py#L67-L78 | null | class Parser(object):
def __init__(self, handlers):
self._line_handlers = handlers
|
mongolab/dex | dex/dex.py | Dex.generate_query_report | python | def generate_query_report(self, db_uri, query, db_name, collection_name):
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name) | Analyzes a single query | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L77-L82 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex.analyze_profile | python | def analyze_profile(self):
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0 | Analyzes queries from a given log file | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L132-L166 | [
"def _process_query(self, input, parser):\n self._run_stats['linesRead'] += 1\n\n line_time = get_line_time(input)\n\n if line_time is not None:\n if ((self._run_stats['timeRange']['start'] is None) or\n (self._run_stats['timeRange']['start'] > line_time)):\n self._run_stats['t... | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex.watch_profile | python | def watch_profile(self):
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0 | Analyzes queries from a given log file | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L169-L228 | [
"def _process_query(self, input, parser):\n self._run_stats['linesRead'] += 1\n\n line_time = get_line_time(input)\n\n if line_time is not None:\n if ((self._run_stats['timeRange']['start'] is None) or\n (self._run_stats['timeRange']['start'] > line_time)):\n self._run_stats['t... | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex.analyze_logfile | python | def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0 | Analyzes queries from a given log file | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L231-L239 | [
"def analyze_logfile_object(self, file_object):\n \"\"\"Analyzes queries from a given log file\"\"\"\n log_parser = LogParser()\n\n if self._start_time is None:\n self._start_time = datetime.now()\n if self._timeout != 0:\n self._end_time = self._start_time + timedelta(minutes=self... | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex.analyze_logfile_object | python | def analyze_logfile_object(self, file_object):
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0 | Analyzes queries from a given log file | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L242-L261 | [
"def _process_query(self, input, parser):\n self._run_stats['linesRead'] += 1\n\n line_time = get_line_time(input)\n\n if line_time is not None:\n if ((self._run_stats['timeRange']['start'] is None) or\n (self._run_stats['timeRange']['start'] > line_time)):\n self._run_stats['t... | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex.watch_logfile | python | def watch_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0 | Analyzes queries from the tail of a given log file | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L264-L287 | [
"def _process_query(self, input, parser):\n self._run_stats['linesRead'] += 1\n\n line_time = get_line_time(input)\n\n if line_time is not None:\n if ((self._run_stats['timeRange']['start'] is None) or\n (self._run_stats['timeRange']['start'] > line_time)):\n self._run_stats['t... | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._tail_file | python | def _tail_file(self, file, interval):
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line | Tails a file | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L316-L326 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._tail_profile | python | def _tail_profile(self, db, interval):
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc | Tails the system.profile collection | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L329-L343 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._tuplefy_namespace | python | def _tuplefy_namespace(self, namespace):
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple | Converts a mongodb namespace to a db, collection tuple | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L347-L358 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._validate_namespaces | python | def _validate_namespaces(self, input_namespaces):
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces | Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L362-L390 | [
"def _tuplefy_namespace(self, namespace):\n \"\"\"Converts a mongodb namespace to a db, collection tuple\"\"\"\n namespace_split = namespace.split('.', 1)\n if len(namespace_split) is 1:\n # we treat a single element as a collection name.\n # this also properly tuplefies '*'\n namespac... | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._namespace_requested | python | def _namespace_requested(self, namespace):
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple) | Checks whether the requested_namespaces contain the provided
namespace | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L393-L404 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._tuple_requested | python | def _tuple_requested(self, namespace_tuple):
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False | Helper for _namespace_requested. Supports limited wildcards | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L407-L428 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
############################################################################
def _get_requested_databases(self):
"""Returns a list of databases requested, not including ignored dbs"""
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases
|
mongolab/dex | dex/dex.py | Dex._get_requested_databases | python | def _get_requested_databases(self):
requested_databases = []
if ((self._requested_namespaces is not None) and
(self._requested_namespaces != [])):
for requested_namespace in self._requested_namespaces:
if requested_namespace[0] is '*':
return []
elif requested_namespace[0] not in IGNORE_DBS:
requested_databases.append(requested_namespace[0])
return requested_databases | Returns a list of databases requested, not including ignored dbs | train | https://github.com/mongolab/dex/blob/f6dc27321028ef1ffdb3d4b1165fdcce7c8f20aa/dex/dex.py#L431-L441 | null | class Dex:
############################################################################
def __init__(self, db_uri, verbose, namespaces_list, slowms, check_indexes, timeout):
self._check_indexes = check_indexes
self._query_analyzer = QueryAnalyzer(check_indexes)
self._db_uri = db_uri
self._slowms = slowms
self._verbose = verbose
self._requested_namespaces = self._validate_namespaces(namespaces_list)
self._recommendation_cache = []
self._report = ReportAggregation()
self._start_time = None
self._timeout_time = None
self._timeout = timeout
self._run_stats = self._get_initial_run_stats()
self._first_line = True
############################################################################
def generate_query_report(self, db_uri, query, db_name, collection_name):
"""Analyzes a single query"""
return self._query_analyzer.generate_query_report(db_uri,
query,
db_name,
collection_name)
############################################################################
def _process_query(self, input, parser):
self._run_stats['linesRead'] += 1
line_time = get_line_time(input)
if line_time is not None:
if ((self._run_stats['timeRange']['start'] is None) or
(self._run_stats['timeRange']['start'] > line_time)):
self._run_stats['timeRange']['start'] = line_time
if ((self._run_stats['timeRange']['end'] is None) or
(self._run_stats['timeRange']['end'] < line_time)):
self._run_stats['timeRange']['end'] = line_time
parsed = parser.parse(input)
if parsed is not None:
if parsed['supported']:
self._run_stats['linesAnalyzed'] += 1
namespace_tuple = self._tuplefy_namespace(parsed['ns'])
# If the query is for a requested namespace ....
if self._namespace_requested(parsed['ns']):
db_name = namespace_tuple[0]
collection_name = namespace_tuple[1]
query_report = None
if parsed['stats']['millis'] >= self._slowms:
try:
query_report = self.generate_query_report(self._db_uri,
parsed,
db_name,
collection_name)
except Exception as e:
#print traceback.print_exc()
return 1
if query_report is not None:
if query_report['recommendation'] is not None:
self._run_stats['linesWithRecommendations'] += 1
self._report.add_query_occurrence(query_report)
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithTime'] += 1
self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] += int(parsed['stats']['millis'])
self._run_stats['unparsableLineInfo']['unparsedAvgTimeMillis'] = self._run_stats['unparsableLineInfo']['unparsedTimeMillis'] / self._run_stats['unparsableLineInfo']['unparsableLinesWithTime']
else:
self._run_stats['unparsableLineInfo']['unparsableLines'] += 1
self._run_stats['unparsableLineInfo']['unparsableLinesWithoutTime'] += 1
############################################################################
def analyze_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI or provide " \
+ "a namespace filter with -n.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
for database in databases:
db = connection[database]
profile_entries = db['system.profile'].find()
for profile_entry in profile_entries:
self._process_query(profile_entry,
profile_parser)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0
############################################################################
def analyze_logfile(self, logfile_path):
self._run_stats['logSource'] = logfile_path
"""Analyzes queries from a given log file"""
with open(logfile_path) as obj:
self.analyze_logfile_object(obj)
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def analyze_logfile_object(self, file_object):
"""Analyzes queries from a given log file"""
log_parser = LogParser()
if self._start_time is None:
self._start_time = datetime.now()
if self._timeout != 0:
self._end_time = self._start_time + timedelta(minutes=self._timeout)
else:
self._end_time = None
# For each line in the logfile ...
for line in file_object:
if self._end_time is not None and datetime.now() > self._end_time:
self._run_stats['timedOut'] = True
self._run_stats['timeoutInMinutes'] = self._timeout
break
self._process_query(line, log_parser)
return 0
############################################################################
def watch_logfile(self, logfile_path):
"""Analyzes queries from the tail of a given log file"""
self._run_stats['logSource'] = logfile_path
log_parser = LogParser()
# For each new line in the logfile ...
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
firstLine = True
for line in self._tail_file(open(logfile_path),
WATCH_INTERVAL_SECONDS):
if firstLine:
self._run_stats['timeRange']['start'] = get_line_time(line)
self._process_query(line, log_parser)
self._run_stats['timeRange']['end'] = get_line_time(line)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
return 0
############################################################################
def _get_initial_run_stats(self):
"""Singlesource for initializing an output dict"""
return OrderedDict([('linesWithRecommendations', 0),
('linesAnalyzed', 0),
('linesRead', 0),
('dexTime', datetime.utcnow()),
('logSource', None),
('timeRange', OrderedDict([('start', None),
('end', None)])),
('unparsableLineInfo', OrderedDict([('unparsableLines', 0),
('unparsableLinesWithoutTime', 0),
('unparsableLinesWithTime', 0),
('unparsedTimeMillis', 0),
('unparsedAvgTimeMillis', 0)]))])
############################################################################
def _make_aggregated_report(self):
output = OrderedDict([('runStats', self._run_stats),
('results', self._report.get_reports())])
return output
############################################################################
def _output_aggregated_report(self, out):
out.write(pretty_json(self._make_aggregated_report()).replace('"', "'").replace("\\'", '"') + "\n")
############################################################################
def _tail_file(self, file, interval):
"""Tails a file"""
file.seek(0,2)
while True:
where = file.tell()
line = file.readline()
if not line:
time.sleep(interval)
file.seek(where)
else:
yield line
############################################################################
def _tail_profile(self, db, interval):
"""Tails the system.profile collection"""
latest_doc = None
while latest_doc is None:
time.sleep(interval)
latest_doc = db['system.profile'].find_one()
current_time = latest_doc['ts']
while True:
time.sleep(interval)
cursor = db['system.profile'].find({'ts': {'$gte': current_time}}).sort('ts', pymongo.ASCENDING)
for doc in cursor:
current_time = doc['ts']
yield doc
############################################################################
def _tuplefy_namespace(self, namespace):
"""Converts a mongodb namespace to a db, collection tuple"""
namespace_split = namespace.split('.', 1)
if len(namespace_split) is 1:
# we treat a single element as a collection name.
# this also properly tuplefies '*'
namespace_tuple = ('*', namespace_split[0])
elif len(namespace_split) is 2:
namespace_tuple = (namespace_split[0],namespace_split[1])
else:
return None
return namespace_tuple
############################################################################
# Need to add rejection of true regex attempts.
def _validate_namespaces(self, input_namespaces):
"""Converts a list of db namespaces to a list of namespace tuples,
supporting basic commandline wildcards"""
output_namespaces = []
if input_namespaces == []:
return output_namespaces
elif '*' in input_namespaces:
if len(input_namespaces) > 1:
warning = 'Warning: Multiple namespaces are '
warning += 'ignored when one namespace is "*"\n'
sys.stderr.write(warning)
return output_namespaces
else:
for namespace in input_namespaces:
if not isinstance(namespace, unicode):
namespace = unicode(namespace)
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple is None:
warning = 'Warning: Invalid namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
else:
if namespace_tuple not in output_namespaces:
output_namespaces.append(namespace_tuple)
else:
warning = 'Warning: Duplicate namespace ' + namespace
warning += ' will be ignored\n'
sys.stderr.write(warning)
return output_namespaces
############################################################################
def _namespace_requested(self, namespace):
"""Checks whether the requested_namespaces contain the provided
namespace"""
if namespace is None:
return False
namespace_tuple = self._tuplefy_namespace(namespace)
if namespace_tuple[0] in IGNORE_DBS:
return False
elif namespace_tuple[1] in IGNORE_COLLECTIONS:
return False
else:
return self._tuple_requested(namespace_tuple)
############################################################################
def _tuple_requested(self, namespace_tuple):
"""Helper for _namespace_requested. Supports limited wildcards"""
if not isinstance(namespace_tuple[0], unicode):
encoded_db = unicode(namespace_tuple[0])
else:
encoded_db = namespace_tuple[0]
if not isinstance(namespace_tuple[1], unicode):
encoded_coll = unicode(namespace_tuple[1])
else:
encoded_coll = namespace_tuple[1]
if namespace_tuple is None:
return False
elif len(self._requested_namespaces) is 0:
return True
for requested_namespace in self._requested_namespaces:
if ((((requested_namespace[0]) == u'*') or
(encoded_db == requested_namespace[0])) and
(((requested_namespace[1]) == u'*') or
(encoded_coll == requested_namespace[1]))):
return True
return False
############################################################################
|
fuzeman/trakt.py | trakt/interfaces/search.py | SearchInterface.lookup | python | def lookup(self, id, service=None, media=None, extended=None, **kwargs):
# Expand tuple `id`
if type(id) is tuple:
if len(id) != 2:
raise ValueError()
id, service = id
# Validate parameters
if not service:
raise ValueError('Invalid value provided for the "service" parameter')
# Build query
query = {}
if isinstance(media, six.string_types):
query['type'] = media
elif isinstance(media, list):
query['type'] = ','.join(media)
if extended:
query['extended'] = extended
# Send request
response = self.http.get(
params=[service, id],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if not items:
return None
count = len(items)
if count > 1:
return SearchMapper.process_many(self.client, items)
elif count == 1:
return SearchMapper.process(self.client, items[0])
return None | Lookup items by their Trakt, IMDB, TMDB, TVDB, or TVRage ID.
**Note:** If you lookup an identifier without a :code:`media` type specified it
might return multiple items if the :code:`service` is not globally unique.
:param id: Identifier value to lookup
:type id: :class:`~python:str` or :class:`~python:int`
:param service: Identifier service
**Possible values:**
- :code:`trakt`
- :code:`imdb`
- :code:`tmdb`
- :code:`tvdb`
- :code:`tvrage`
:type service: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`trakt.objects.media.Media` or :class:`~python:list` of :class:`trakt.objects.media.Media` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/search.py#L14-L103 | [
"def get_data(self, response, exceptions=False, pagination=False, parse=True):\n if response is None:\n if exceptions:\n raise RequestFailedError('No response available')\n\n log.warning('Request failed (no response returned)')\n return None\n\n # Return response, if parse=Fals... | class SearchInterface(Interface):
path = 'search'
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
"""Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None
|
fuzeman/trakt.py | trakt/interfaces/search.py | SearchInterface.query | python | def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
# Validate parameters
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not media:
raise ValueError('"fields" can only be used when the "media" parameter is defined')
# Build query
query = {
'query': query
}
if year:
query['year'] = year
if fields:
query['fields'] = fields
if extended:
query['extended'] = extended
# Serialize media items
if isinstance(media, list):
media = ','.join(media)
# Send request
response = self.http.get(
params=[media],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if items is not None:
return SearchMapper.process_many(self.client, items)
return None | Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param year: Desired media year (or :code:`None` to return all matching items)
:type year: :class:`~python:str` or :class:`~python:int`
:param fields: Fields to search for :code:`query` (or :code:`None` to search all fields)
:type fields: :class:`~python:str` or :class:`~python:list`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/search.py#L105-L187 | [
"def get_data(self, response, exceptions=False, pagination=False, parse=True):\n if response is None:\n if exceptions:\n raise RequestFailedError('No response available')\n\n log.warning('Request failed (no response returned)')\n return None\n\n # Return response, if parse=Fals... | class SearchInterface(Interface):
path = 'search'
def lookup(self, id, service=None, media=None, extended=None, **kwargs):
"""Lookup items by their Trakt, IMDB, TMDB, TVDB, or TVRage ID.
**Note:** If you lookup an identifier without a :code:`media` type specified it
might return multiple items if the :code:`service` is not globally unique.
:param id: Identifier value to lookup
:type id: :class:`~python:str` or :class:`~python:int`
:param service: Identifier service
**Possible values:**
- :code:`trakt`
- :code:`imdb`
- :code:`tmdb`
- :code:`tvdb`
- :code:`tvrage`
:type service: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)
**Possible values:**
- :code:`movie`
- :code:`show`
- :code:`episode`
- :code:`person`
- :code:`list`
:type media: :class:`~python:str` or :class:`~python:list` of :class:`~python:str`
:param extended: Level of information to include in response
**Possible values:**
- :code:`None`: Minimal (e.g. title, year, ids) **(default)**
- :code:`full`: Complete
:type extended: :class:`~python:str`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Results
:rtype: :class:`trakt.objects.media.Media` or :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
# Expand tuple `id`
if type(id) is tuple:
if len(id) != 2:
raise ValueError()
id, service = id
# Validate parameters
if not service:
raise ValueError('Invalid value provided for the "service" parameter')
# Build query
query = {}
if isinstance(media, six.string_types):
query['type'] = media
elif isinstance(media, list):
query['type'] = ','.join(media)
if extended:
query['extended'] = extended
# Send request
response = self.http.get(
params=[service, id],
query=query
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
if not items:
return None
count = len(items)
if count > 1:
return SearchMapper.process_many(self.client, items)
elif count == 1:
return SearchMapper.process(self.client, items[0])
return None
|
fuzeman/trakt.py | trakt/objects/season.py | Season.to_identifier | python | def to_identifier(self):
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
} | Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/season.py#L49-L62 | null | class Season(Media):
def __init__(self, client, keys=None, index=None):
super(Season, self).__init__(client, keys, index)
self.show = None
"""
:type: :class:`trakt.objects.show.Show`
Show
"""
self.episodes = {}
"""
:type: :class:`~python:dict`
Episodes, defined as :code:`{episode_num: Episode}`
**Note:** this field might not be available with some methods
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.episode_count = None
"""
:type: :class:`~python:int`
Total episode count
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
@deprecated('Season.to_info() has been moved to Season.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def to_dict(self):
"""Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Season, self)._update(info, **kwargs)
update_attributes(self, info, [
# Extended Info
'episode_count',
'aired_episodes'
])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
season = cls(client, keys, index=index)
season._update(info, **kwargs)
return season
def __repr__(self):
if self.show:
return '<Season %r - S%02d>' % (self.show.title, self.pk)
return '<Season S%02d>' % self.pk
|
fuzeman/trakt.py | trakt/objects/season.py | Season.to_dict | python | def to_dict(self):
result = self.to_identifier()
result.update({
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the season identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.episode_count:
result['episode_count'] = self.episode_count
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result | Dump season to a dictionary.
:return: Season dictionary
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/season.py#L69-L100 | [
"def to_iso8601_datetime(value):\n if value is None:\n return None\n\n return value.strftime('%Y-%m-%dT%H:%M:%S') + '.000-00:00'\n",
"def to_identifier(self):\n \"\"\"Return the season identifier which is compatible with requests that require season definitions.\n\n :return: Season identifier/d... | class Season(Media):
def __init__(self, client, keys=None, index=None):
super(Season, self).__init__(client, keys, index)
self.show = None
"""
:type: :class:`trakt.objects.show.Show`
Show
"""
self.episodes = {}
"""
:type: :class:`~python:dict`
Episodes, defined as :code:`{episode_num: Episode}`
**Note:** this field might not be available with some methods
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.episode_count = None
"""
:type: :class:`~python:int`
Total episode count
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def to_identifier(self):
"""Return the season identifier which is compatible with requests that require season definitions.
:return: Season identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'number': self.pk,
'episodes': [
episode.to_dict()
for episode in self.episodes.values()
]
}
@deprecated('Season.to_info() has been moved to Season.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def _update(self, info=None, **kwargs):
if not info:
return
super(Season, self)._update(info, **kwargs)
update_attributes(self, info, [
# Extended Info
'episode_count',
'aired_episodes'
])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
season = cls(client, keys, index=index)
season._update(info, **kwargs)
return season
def __repr__(self):
if self.show:
return '<Season %r - S%02d>' % (self.show.title, self.pk)
return '<Season S%02d>' % self.pk
|
fuzeman/trakt.py | trakt/objects/episode.py | Episode.to_dict | python | def to_dict(self):
result = self.to_identifier()
result.update({
'title': self.title,
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at),
'ids': dict([
(key, value) for (key, value) in self.keys[1:] # NOTE: keys[0] is the (<season>, <episode>) identifier
])
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.available_translations:
result['available_translations'] = self.available_translations
return result | Dump episode to a dictionary.
:return: Episode dictionary
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/episode.py#L72-L117 | [
"def to_iso8601_datetime(value):\n if value is None:\n return None\n\n return value.strftime('%Y-%m-%dT%H:%M:%S') + '.000-00:00'\n",
"def to_identifier(self):\n \"\"\"Retrieve the episode identifier.\n\n :return: Episode identifier/definition\n :rtype: :class:`~python:dict`\n \"\"\"\n\n ... | class Episode(Video):
def __init__(self, client, keys=None, index=None):
super(Episode, self).__init__(client, keys, index)
self.show = None
"""
:type: :class:`trakt.objects.show.Show`
Show
"""
self.season = None
"""
:type: :class:`trakt.objects.season.Season`
Season
"""
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
def to_identifier(self):
"""Retrieve the episode identifier.
:return: Episode identifier/definition
:rtype: :class:`~python:dict`
"""
_, number = self.pk
return {
'number': number
}
@deprecated('Episode.to_info() has been moved to Episode.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def _update(self, info=None, **kwargs):
if not info:
return
super(Episode, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Extended Info
'available_translations'
])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
episode = cls(client, keys, index=index)
episode._update(info, **kwargs)
return episode
def __repr__(self):
if self.show and self.title:
return '<Episode %r - S%02dE%02d - %r>' % (self.show.title, self.pk[0], self.pk[1], self.title)
if self.show:
return '<Episode %r - S%02dE%02d>' % (self.show.title, self.pk[0], self.pk[1])
if self.title:
return '<Episode S%02dE%02d - %r>' % (self.pk[0], self.pk[1], self.title)
return '<Episode S%02dE%02d>' % self.pk
|
fuzeman/trakt.py | examples/authentication/device.py | Application.on_aborted | python | def on_aborted(self):
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event) | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/examples/authentication/device.py#L70-L82 | null | class Application(object):
def __init__(self):
self.is_authenticating = Condition()
self.authorization = None
# Bind trakt events
Trakt.on('oauth.token_refreshed', self.on_token_refreshed)
def authenticate(self):
if not self.is_authenticating.acquire(blocking=False):
print('Authentication has already been started')
return False
# Request new device code
code = Trakt['oauth/device'].code()
print('Enter the code "%s" at %s to authenticate your account' % (
code.get('user_code'),
code.get('verification_url')
))
# Construct device authentication poller
poller = Trakt['oauth/device'].poll(**code)\
.on('aborted', self.on_aborted)\
.on('authenticated', self.on_authenticated)\
.on('expired', self.on_expired)\
.on('poll', self.on_poll)
# Start polling for authentication token
poller.start(daemon=False)
# Wait for authentication to complete
return self.is_authenticating.wait()
def run(self):
self.authenticate()
if not self.authorization:
print('ERROR: Authentication required')
exit(1)
# Simulate expired token
self.authorization['expires_in'] = 0
# Test authenticated calls
with Trakt.configuration.oauth.from_response(self.authorization):
# Expired token, requests will return `None`
print(Trakt['sync/collection'].movies())
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
# Expired token will be refreshed automatically (as `refresh=True`)
print(Trakt['sync/collection'].movies())
with Trakt.configuration.oauth.from_response(self.authorization):
# Current token is still valid
print(Trakt['sync/collection'].movies())
def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_expired(self):
"""Device authentication expired."""
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_poll(self, callback):
"""Device authentication poll.
:param callback: Call with `True` to continue polling, or `False` to abort polling
:type callback: func
"""
# Continue polling
callback(True)
def on_token_refreshed(self, authorization):
# OAuth token refreshed, store authorization for future calls
self.authorization = authorization
print('Token refreshed - authorization: %r' % self.authorization)
|
fuzeman/trakt.py | examples/authentication/device.py | Application.on_authenticated | python | def on_authenticated(self, authorization):
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authenticated.
:param authorization: Authentication token details
:type authorization: dict | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/examples/authentication/device.py#L84-L101 | null | class Application(object):
def __init__(self):
self.is_authenticating = Condition()
self.authorization = None
# Bind trakt events
Trakt.on('oauth.token_refreshed', self.on_token_refreshed)
def authenticate(self):
if not self.is_authenticating.acquire(blocking=False):
print('Authentication has already been started')
return False
# Request new device code
code = Trakt['oauth/device'].code()
print('Enter the code "%s" at %s to authenticate your account' % (
code.get('user_code'),
code.get('verification_url')
))
# Construct device authentication poller
poller = Trakt['oauth/device'].poll(**code)\
.on('aborted', self.on_aborted)\
.on('authenticated', self.on_authenticated)\
.on('expired', self.on_expired)\
.on('poll', self.on_poll)
# Start polling for authentication token
poller.start(daemon=False)
# Wait for authentication to complete
return self.is_authenticating.wait()
def run(self):
self.authenticate()
if not self.authorization:
print('ERROR: Authentication required')
exit(1)
# Simulate expired token
self.authorization['expires_in'] = 0
# Test authenticated calls
with Trakt.configuration.oauth.from_response(self.authorization):
# Expired token, requests will return `None`
print(Trakt['sync/collection'].movies())
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
# Expired token will be refreshed automatically (as `refresh=True`)
print(Trakt['sync/collection'].movies())
with Trakt.configuration.oauth.from_response(self.authorization):
# Current token is still valid
print(Trakt['sync/collection'].movies())
def on_aborted(self):
"""Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event)
"""
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_expired(self):
"""Device authentication expired."""
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_poll(self, callback):
"""Device authentication poll.
:param callback: Call with `True` to continue polling, or `False` to abort polling
:type callback: func
"""
# Continue polling
callback(True)
def on_token_refreshed(self, authorization):
# OAuth token refreshed, store authorization for future calls
self.authorization = authorization
print('Token refreshed - authorization: %r' % self.authorization)
|
fuzeman/trakt.py | examples/authentication/device.py | Application.on_expired | python | def on_expired(self):
print('Authentication expired')
# Authentication expired
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release() | Device authentication expired. | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/examples/authentication/device.py#L103-L111 | null | class Application(object):
def __init__(self):
self.is_authenticating = Condition()
self.authorization = None
# Bind trakt events
Trakt.on('oauth.token_refreshed', self.on_token_refreshed)
def authenticate(self):
if not self.is_authenticating.acquire(blocking=False):
print('Authentication has already been started')
return False
# Request new device code
code = Trakt['oauth/device'].code()
print('Enter the code "%s" at %s to authenticate your account' % (
code.get('user_code'),
code.get('verification_url')
))
# Construct device authentication poller
poller = Trakt['oauth/device'].poll(**code)\
.on('aborted', self.on_aborted)\
.on('authenticated', self.on_authenticated)\
.on('expired', self.on_expired)\
.on('poll', self.on_poll)
# Start polling for authentication token
poller.start(daemon=False)
# Wait for authentication to complete
return self.is_authenticating.wait()
def run(self):
self.authenticate()
if not self.authorization:
print('ERROR: Authentication required')
exit(1)
# Simulate expired token
self.authorization['expires_in'] = 0
# Test authenticated calls
with Trakt.configuration.oauth.from_response(self.authorization):
# Expired token, requests will return `None`
print(Trakt['sync/collection'].movies())
with Trakt.configuration.oauth.from_response(self.authorization, refresh=True):
# Expired token will be refreshed automatically (as `refresh=True`)
print(Trakt['sync/collection'].movies())
with Trakt.configuration.oauth.from_response(self.authorization):
# Current token is still valid
print(Trakt['sync/collection'].movies())
def on_aborted(self):
"""Device authentication aborted.
Triggered when device authentication was aborted (either with `DeviceOAuthPoller.stop()`
or via the "poll" event)
"""
print('Authentication aborted')
# Authentication aborted
self.is_authenticating.acquire()
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_authenticated(self, authorization):
"""Device authenticated.
:param authorization: Authentication token details
:type authorization: dict
"""
# Acquire condition
self.is_authenticating.acquire()
# Store authorization for future calls
self.authorization = authorization
print('Authentication successful - authorization: %r' % self.authorization)
# Authentication complete
self.is_authenticating.notify_all()
self.is_authenticating.release()
def on_poll(self, callback):
"""Device authentication poll.
:param callback: Call with `True` to continue polling, or `False` to abort polling
:type callback: func
"""
# Continue polling
callback(True)
def on_token_refreshed(self, authorization):
# OAuth token refreshed, store authorization for future calls
self.authorization = authorization
print('Token refreshed - authorization: %r' % self.authorization)
|
fuzeman/trakt.py | trakt/interfaces/oauth/device.py | DeviceOAuthInterface.poll | python | def poll(self, device_code, expires_in, interval, **kwargs):
return DeviceOAuthPoller(self.client, device_code, expires_in, interval) | Construct the device authentication poller.
:param device_code: Device authentication code
:type device_code: str
:param expires_in: Device authentication code expiry (in seconds)
:type in: int
:param interval: Device authentication poll interval
:type interval: int
:rtype: DeviceOAuthPoller | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/oauth/device.py#L42-L56 | null | class DeviceOAuthInterface(Interface):
path = 'oauth/device'
def code(self, **kwargs):
client_id = self.client.configuration['client.id']
if not client_id:
raise ValueError('"client.id" configuration parameter is required')
response = self.http.post(
'code',
data={
'client_id': client_id
}
)
data = self.get_data(response, **kwargs)
if isinstance(data, requests.Response):
return data
if not data:
return None
return data
def token(self, device_code, **kwargs):
client_id = self.client.configuration['client.id']
client_secret = self.client.configuration['client.secret']
if not client_id:
raise ValueError('"client.id" and "client.secret" configuration parameters are required')
response = self.http.post(
'token',
data={
'client_id': client_id,
'client_secret': client_secret,
'code': device_code
}
)
data = self.get_data(response, **kwargs)
if isinstance(data, requests.Response):
return data
if not data:
return None
return data
|
fuzeman/trakt.py | trakt/objects/movie.py | Movie.to_dict | python | def to_dict(self):
result = self.to_identifier()
result.update({
'watched': 1 if self.is_watched else 0,
'collected': 1 if self.is_collected else 0,
'plays': self.plays if self.plays is not None else 0,
'in_watchlist': self.in_watchlist if self.in_watchlist is not None else 0,
'progress': self.progress,
'last_watched_at': to_iso8601_datetime(self.last_watched_at),
'collected_at': to_iso8601_datetime(self.collected_at),
'paused_at': to_iso8601_datetime(self.paused_at)
})
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.released:
result['released'] = to_iso8601_date(self.released)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.tagline:
result['tagline'] = self.tagline
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.homepage:
result['homepage'] = self.homepage
if self.trailer:
result['trailer'] = self.trailer
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
return result | Dump movie to a dictionary.
:return: Movie dictionary
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/movie.py#L123-L183 | [
"def to_iso8601_datetime(value):\n if value is None:\n return None\n\n return value.strftime('%Y-%m-%dT%H:%M:%S') + '.000-00:00'\n",
"def to_iso8601_date(value):\n if value is None:\n return None\n\n return value.strftime('%Y-%m-%d')\n",
"def to_identifier(self):\n \"\"\"Return the ... | class Movie(Video):
def __init__(self, client, keys, index=None):
super(Movie, self).__init__(client, keys, index)
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.year = None
"""
:type: :class:`~python:int`
Year
"""
self.watchers = None # trending
"""
:type: :class:`~python:int`
Number of active watchers (returned by the :code:`Trakt['movies'].trending()`
and :code:`Trakt['shows'].trending()` methods)
"""
self.tagline = None
"""
:type: :class:`~python:str`
Tagline
"""
self.released = None
"""
:type: :class:`~python:datetime.date`
Release date
"""
self.runtime = None
"""
:type: :class:`~python:int`
Duration (in minutes)
"""
self.certification = None
"""
:type: :class:`~python:str`
Content certification (e.g :code:`PG-13`)
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.homepage = None
"""
:type: :class:`~python:str`
Homepage URL
"""
self.trailer = None
"""
:type: :class:`~python:str`
Trailer URL
"""
self.language = None
"""
:type: :class:`~python:str`
Language (for title, overview, etc..)
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
self.genres = None
"""
:type: :class:`~python:list`
Genres
"""
def to_identifier(self):
"""Return the movie identifier which is compatible with requests that require movie definitions.
:return: Movie identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'ids': dict(self.keys),
'title': self.title,
'year': self.year
}
@deprecated('Movie.to_info() has been moved to Movie.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def _update(self, info=None, **kwargs):
if not info:
return
super(Movie, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Trending
'watchers',
# Extended Info
'tagline',
'certification',
'homepage',
'trailer',
'language',
'available_translations',
'genres'
])
# Ensure `year` attribute is an integer (fixes incorrect type returned by search)
if info.get('year'):
self.year = int(info['year'])
# Extended Info
if info.get('runtime'):
self.runtime = info['runtime']
if 'released' in info:
self.released = from_iso8601_date(info.get('released'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info, index=None, **kwargs):
movie = cls(client, keys, index=index)
movie._update(info, **kwargs)
return movie
def __repr__(self):
return '<Movie %r (%s)>' % (self.title, self.year)
|
fuzeman/trakt.py | trakt/objects/progress.py | Progress.to_dict | python | def to_dict(self):
result = super(Progress, self).to_dict()
label = LABELS['last_progress_change'][self.progress_type]
result[label] = to_iso8601_datetime(self.last_progress_change)
if self.progress_type == 'watched':
result['reset_at'] = self.reset_at
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
if self.hidden_seasons:
result['hidden_seasons'] = [
popitems(season.to_dict(), ['number', 'ids'])
for season in self.hidden_seasons.values()
]
if self.next_episode:
result['next_episode'] = popitems(self.next_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['next_episode']['season'] = self.next_episode.keys[0][0]
if self.last_episode:
result['last_episode'] = popitems(self.last_episode.to_dict(), ['season', 'number', 'title', 'ids'])
result['last_episode']['season'] = self.last_episode.keys[0][0]
return result | Dump progress to a dictionary.
:return: Progress dictionary
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/progress.py#L108-L142 | [
"def popitems(d, keys):\n result = {}\n\n for key in keys:\n value = d.pop(key, None)\n\n if value is not None:\n result[key] = value\n\n return result\n",
"def to_iso8601_datetime(value):\n if value is None:\n return None\n\n return value.strftime('%Y-%m-%dT%H:%M:%S... | class Progress(BaseProgress):
progress_type = None
"""
:type: :class:`~python:str`
Progress Type (:code:`watched` or :code:`collection`)
"""
def __init__(self, client, aired=None, completed=None):
super(Progress, self).__init__(aired, completed)
self._client = client
self.last_progress_change = None
"""
:type: :class:`~python:datetime.datetime`
Last watched or collected date/time
"""
self.reset_at = None
"""
:type: :class:`~python:datetime.datetime`
Reset date/time (not applicable for collected progress)
"""
self.seasons = {}
"""
:type: :class:`~python:dict`
Season Progress, defined as :code:`{season_num: SeasonProgress}`
"""
self.hidden_seasons = None
"""
:type: :class:`~python:dict`
Hidden Seasons, defined as :code:`{season_num: Season}`
"""
self.next_episode = None
"""
:type: :class:`trakt.objects.episode.Episode`
Next Episode the user should watch or collect
"""
self.last_episode = None
"""
:type: :class:`trakt.objects.episode.Episode`
Last Episode the user watched or collected
"""
def _update(self, info=None, **kwargs):
if not info:
return
super(Progress, self)._update(info, **kwargs)
label = LABELS['last_progress_change'][self.progress_type]
if label in info:
self.last_progress_change = from_iso8601_datetime(info.get(label))
if 'reset_at' in info:
self.reset_at = from_iso8601_datetime(info.get('reset_at'))
if 'seasons' in info:
for season in info['seasons']:
season_progress = SeasonProgress._construct(season, progress_type=self.progress_type)
if season_progress:
self.seasons[season_progress.pk] = season_progress
if 'hidden_seasons' in info:
self.hidden_seasons = {}
for season in info['hidden_seasons']:
hidden_season = self._client.construct('season', season)
if hidden_season:
self.hidden_seasons[hidden_season.pk] = hidden_season
if 'next_episode' in info:
episode = self._client.construct('episode', info['next_episode'])
if episode:
self.next_episode = episode
if 'last_episode' in info:
episode = self._client.construct('episode', info['last_episode'])
if episode:
self.last_episode = episode
@classmethod
def _construct(cls, client, info=None, **kwargs):
if not info:
return
progress = cls(client)
progress._update(info, **kwargs)
return progress
|
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.action | python | def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs) | Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L12-L134 | [
"def popitems(d, keys):\n result = {}\n\n for key in keys:\n value = d.pop(key, None)\n\n if value is not None:\n result[key] = value\n\n return result\n",
"def get_data(self, response, exceptions=False, pagination=False, parse=True):\n if response is None:\n if excepti... | class ScrobbleInterface(Interface):
path = 'scrobble'
@application
@authenticated
@application
@authenticated
def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
)
|
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.start | python | def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
) | Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L138-L238 | null | class ScrobbleInterface(Interface):
path = 'scrobble'
@application
@authenticated
def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
@application
@authenticated
@application
@authenticated
def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
)
|
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.pause | python | def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
) | Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L242-L340 | null | class ScrobbleInterface(Interface):
path = 'scrobble'
@application
@authenticated
def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
@application
@authenticated
def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
@application
@authenticated
def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
)
|
fuzeman/trakt.py | trakt/interfaces/scrobble.py | ScrobbleInterface.stop | python | def stop(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
return self.action(
'stop',
movie, show, episode,
progress,
**kwargs
) | Send the scrobble "stop" action.
Use this method when the video is stopped or finishes playing on its own. If the
progress is above 80%, the video will be scrobbled and the :code:`action` will be set
to **scrobble**.
If the progress is less than 80%, it will be treated as a *pause* and the :code:`action`
will be set to **pause**. The playback progress will be saved and :code:`Trakt['sync/playback'].get()`
can be used to resume the video from this exact position.
**Note:** If you prefer to use a threshold higher than 80%, you should use :code:`Trakt['scrobble'].pause()`
yourself so it doesn't create duplicate scrobbles.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'scrobble',
'progress': 99.9,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/scrobble.py#L344-L449 | null | class ScrobbleInterface(Interface):
path = 'scrobble'
@application
@authenticated
def action(self, action, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Perform scrobble action.
:param action: Action to perform (either :code:`start`, :code:`pause` or :code:`stop`)
:type action: :class:`~python:str`
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
if movie and (show or episode):
raise ValueError('Only one media type should be provided')
if not movie and not episode:
raise ValueError('Missing media item')
data = {
'progress': progress,
'app_version': kwargs.pop('app_version', self.client.version),
'app_date': kwargs.pop('app_date', None)
}
if movie:
# TODO validate
data['movie'] = movie
elif episode:
if show:
data['show'] = show
# TODO validate
data['episode'] = episode
response = self.http.post(
action,
data=data,
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
return self.get_data(response, **kwargs)
@application
@authenticated
def start(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "start" action.
Use this method when the video initially starts playing or is un-paused. This will
remove any playback progress if it exists.
**Note:** A watching status will auto expire after the remaining runtime has elapsed.
There is no need to re-send every 15 minutes.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'start',
'progress': 1.25,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'start',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
def pause(self, movie=None, show=None, episode=None, progress=0.0, **kwargs):
"""Send the scrobble "pause' action.
Use this method when the video is paused. The playback progress will be saved and
:code:`Trakt['sync/playback'].get()` can be used to resume the video from this exact
position. Un-pause a video by calling the :code:`Trakt['scrobble'].start()` method again.
:param movie: Movie definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'tmdb': 118340
}
}
:type movie: :class:`~python:dict`
:param show: Show definition (or `None`)
**Example:**
.. code-block:: python
{
'title': 'Breaking Bad',
'year': 2008,
'ids': {
'tvdb': 81189
}
}
:type show: :class:`~python:dict`
:param episode: Episode definition (or `None`)
**Example:**
.. code-block:: python
{
"season": 3,
"number": 11
}
:type episode: :class:`~python:dict`
:param progress: Current movie/episode progress percentage
:type progress: :class:`~python:float`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response (or `None`)
**Example:**
.. code-block:: python
{
'action': 'pause',
'progress': 75,
'sharing': {
'facebook': true,
'twitter': true,
'tumblr': false
},
'movie': {
'title': 'Guardians of the Galaxy',
'year': 2014,
'ids': {
'trakt': 28,
'slug': 'guardians-of-the-galaxy-2014',
'imdb': 'tt2015381',
'tmdb': 118340
}
}
}
:rtype: :class:`~python:dict`
"""
return self.action(
'pause',
movie, show, episode,
progress,
**kwargs
)
@application
@authenticated
|
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.delete | python | def delete(self, **kwargs):
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs) | Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L86-L96 | null | class CustomList(List):
def __init__(self, client, keys, username=None):
super(CustomList, self).__init__(client, keys)
self.username = username
"""
:type: :class:`~python:str`
Author username
"""
self.privacy = None
"""
:type: :class:`~python:str`
Privacy mode
**Possible values:**
- :code:`private`
- :code:`friends`
- :code:`public`
"""
def _update(self, info=None):
if not info:
return
super(CustomList, self)._update(info)
update_attributes(self, info, [
'privacy'
])
# Update with user details
user = info.get('user', {})
if user.get('username'):
self.username = user['username']
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
obj = cls(client, keys, **kwargs)
obj._update(info)
return obj
def items(self, **kwargs):
"""Retrieve list items.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Current list items
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
return self._client['users/*/lists/*'].items(self.username, self.id, **kwargs)
#
# Owner actions
#
def add(self, items, **kwargs):
"""Add specified items to the list.
:param items: Items that should be added to the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].add(self.username, self.id, items, **kwargs)
def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True
def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs)
#
# Actions
#
def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs)
def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs)
|
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.update | python | def update(self, **kwargs):
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True | Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L98-L114 | [
"def _update(self, info=None):\n if not info:\n return\n\n super(CustomList, self)._update(info)\n\n update_attributes(self, info, [\n 'privacy'\n ])\n\n # Update with user details\n user = info.get('user', {})\n\n if user.get('username'):\n self.username = user['username']... | class CustomList(List):
def __init__(self, client, keys, username=None):
super(CustomList, self).__init__(client, keys)
self.username = username
"""
:type: :class:`~python:str`
Author username
"""
self.privacy = None
"""
:type: :class:`~python:str`
Privacy mode
**Possible values:**
- :code:`private`
- :code:`friends`
- :code:`public`
"""
def _update(self, info=None):
if not info:
return
super(CustomList, self)._update(info)
update_attributes(self, info, [
'privacy'
])
# Update with user details
user = info.get('user', {})
if user.get('username'):
self.username = user['username']
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
obj = cls(client, keys, **kwargs)
obj._update(info)
return obj
def items(self, **kwargs):
"""Retrieve list items.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Current list items
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
return self._client['users/*/lists/*'].items(self.username, self.id, **kwargs)
#
# Owner actions
#
def add(self, items, **kwargs):
"""Add specified items to the list.
:param items: Items that should be added to the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].add(self.username, self.id, items, **kwargs)
def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs)
def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs)
#
# Actions
#
def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs)
def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs)
|
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.remove | python | def remove(self, items, **kwargs):
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs) | Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L116-L129 | null | class CustomList(List):
def __init__(self, client, keys, username=None):
super(CustomList, self).__init__(client, keys)
self.username = username
"""
:type: :class:`~python:str`
Author username
"""
self.privacy = None
"""
:type: :class:`~python:str`
Privacy mode
**Possible values:**
- :code:`private`
- :code:`friends`
- :code:`public`
"""
def _update(self, info=None):
if not info:
return
super(CustomList, self)._update(info)
update_attributes(self, info, [
'privacy'
])
# Update with user details
user = info.get('user', {})
if user.get('username'):
self.username = user['username']
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
obj = cls(client, keys, **kwargs)
obj._update(info)
return obj
def items(self, **kwargs):
"""Retrieve list items.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Current list items
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
return self._client['users/*/lists/*'].items(self.username, self.id, **kwargs)
#
# Owner actions
#
def add(self, items, **kwargs):
"""Add specified items to the list.
:param items: Items that should be added to the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].add(self.username, self.id, items, **kwargs)
def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs)
def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True
#
# Actions
#
def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs)
def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs)
|
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.like | python | def like(self, **kwargs):
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs) | Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L135-L145 | null | class CustomList(List):
def __init__(self, client, keys, username=None):
super(CustomList, self).__init__(client, keys)
self.username = username
"""
:type: :class:`~python:str`
Author username
"""
self.privacy = None
"""
:type: :class:`~python:str`
Privacy mode
**Possible values:**
- :code:`private`
- :code:`friends`
- :code:`public`
"""
def _update(self, info=None):
if not info:
return
super(CustomList, self)._update(info)
update_attributes(self, info, [
'privacy'
])
# Update with user details
user = info.get('user', {})
if user.get('username'):
self.username = user['username']
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
obj = cls(client, keys, **kwargs)
obj._update(info)
return obj
def items(self, **kwargs):
"""Retrieve list items.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Current list items
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
return self._client['users/*/lists/*'].items(self.username, self.id, **kwargs)
#
# Owner actions
#
def add(self, items, **kwargs):
"""Add specified items to the list.
:param items: Items that should be added to the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].add(self.username, self.id, items, **kwargs)
def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs)
def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True
def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs)
#
# Actions
#
def unlike(self, **kwargs):
"""Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs)
|
fuzeman/trakt.py | trakt/objects/list/custom.py | CustomList.unlike | python | def unlike(self, **kwargs):
return self._client['users/*/lists/*'].unlike(self.username, self.id, **kwargs) | Un-like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/list/custom.py#L147-L157 | null | class CustomList(List):
def __init__(self, client, keys, username=None):
super(CustomList, self).__init__(client, keys)
self.username = username
"""
:type: :class:`~python:str`
Author username
"""
self.privacy = None
"""
:type: :class:`~python:str`
Privacy mode
**Possible values:**
- :code:`private`
- :code:`friends`
- :code:`public`
"""
def _update(self, info=None):
if not info:
return
super(CustomList, self)._update(info)
update_attributes(self, info, [
'privacy'
])
# Update with user details
user = info.get('user', {})
if user.get('username'):
self.username = user['username']
@classmethod
def _construct(cls, client, keys, info, **kwargs):
if not info:
return None
obj = cls(client, keys, **kwargs)
obj._update(info)
return obj
def items(self, **kwargs):
"""Retrieve list items.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Current list items
:rtype: :class:`~python:list` of :class:`trakt.objects.media.Media`
"""
return self._client['users/*/lists/*'].items(self.username, self.id, **kwargs)
#
# Owner actions
#
def add(self, items, **kwargs):
"""Add specified items to the list.
:param items: Items that should be added to the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].add(self.username, self.id, items, **kwargs)
def delete(self, **kwargs):
"""Delete the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].delete(self.username, self.id, **kwargs)
def update(self, **kwargs):
"""Update the list with the current object attributes.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
item = self._client['users/*/lists/*'].update(self.username, self.id, return_type='data', **kwargs)
if not item:
return False
self._update(item)
return True
def remove(self, items, **kwargs):
"""Remove specified items from the list.
:param items: Items that should be removed from the list
:type items: :class:`~python:list`
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Response
:rtype: :class:`~python:dict`
"""
return self._client['users/*/lists/*'].remove(self.username, self.id, items, **kwargs)
#
# Actions
#
def like(self, **kwargs):
"""Like the list.
:param kwargs: Extra request options
:type kwargs: :class:`~python:dict`
:return: Boolean to indicate if the request was successful
:rtype: :class:`~python:bool`
"""
return self._client['users/*/lists/*'].like(self.username, self.id, **kwargs)
|
fuzeman/trakt.py | trakt/interfaces/calendars.py | Base.get | python | def get(self, source, media, collection=None, start_date=None, days=None, query=None, years=None, genres=None,
languages=None, countries=None, runtimes=None, ratings=None, certifications=None, networks=None,
status=None, **kwargs):
if source not in ['all', 'my']:
raise ValueError('Unknown collection type: %s' % (source,))
if media not in ['dvd', 'movies', 'shows']:
raise ValueError('Unknown media type: %s' % (media,))
# Default `start_date` to today when only `days` is provided
if start_date is None and days:
start_date = datetime.utcnow()
# Request calendar collection
response = self.http.get(
'/calendars/%s/%s%s' % (
source, media,
('/' + collection) if collection else ''
),
params=[
start_date.strftime('%Y-%m-%d') if start_date else None,
days
],
query={
'query': query,
'years': years,
'genres': genres,
'languages': languages,
'countries': countries,
'runtimes': runtimes,
'ratings': ratings,
'certifications': certifications,
# TV
'networks': networks,
'status': status
},
**popitems(kwargs, [
'authenticated',
'validate_token'
])
)
# Parse response
items = self.get_data(response, **kwargs)
if isinstance(items, requests.Response):
return items
# Map items
if media == 'shows':
return SummaryMapper.episodes(
self.client, items,
parse_show=True
)
return SummaryMapper.movies(self.client, items) | Retrieve calendar items.
The `all` calendar displays info for all shows airing during the specified period. The `my` calendar displays
episodes for all shows that have been watched, collected, or watchlisted.
:param source: Calendar source (`all` or `my`)
:type source: str
:param media: Media type (`dvd`, `movies` or `shows`)
:type media: str
:param collection: Collection type (`new`, `premieres`)
:type collection: str or None
:param start_date: Start date (defaults to today)
:type start_date: datetime or None
:param days: Number of days to display (defaults to `7`)
:type days: int or None
:param query: Search title or description.
:type query: str or None
:param years: Year or range of years (e.g. `2014`, or `2014-2016`)
:type years: int or str or tuple or None
:param genres: Genre slugs (e.g. `action`)
:type genres: str or list of str or None
:param languages: Language codes (e.g. `en`)
:type languages: str or list of str or None
:param countries: Country codes (e.g. `us`)
:type countries: str or list of str or None
:param runtimes: Runtime range in minutes (e.g. `30-90`)
:type runtimes: str or tuple or None
:param ratings: Rating range between `0` and `100` (e.g. `75-100`)
:type ratings: str or tuple or None
:param certifications: US Content Certification (e.g. `pg-13`, `tv-pg`)
:type certifications: str or list of str or None
:param networks: (TV) Network name (e.g. `HBO`)
:type networks: str or list of str or None
:param status: (TV) Show status (e.g. `returning series`, `in production`, ended`)
:type status: str or list of str or None
:return: Items
:rtype: list of trakt.objects.video.Video | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/interfaces/calendars.py#L24-L135 | [
"def popitems(d, keys):\n result = {}\n\n for key in keys:\n value = d.pop(key, None)\n\n if value is not None:\n result[key] = value\n\n return result\n",
"def get_data(self, response, exceptions=False, pagination=False, parse=True):\n if response is None:\n if excepti... | class Base(Interface):
def new(self, media, **kwargs):
if media != 'shows':
raise ValueError("Media '%s' does not support the `new()` method" % (media,))
return self.get(media, 'new', **kwargs)
def premieres(self, media, **kwargs):
if media != 'shows':
raise ValueError("Media '%s' does not support the `premieres()` method" % (media,))
return self.get(media, 'premieres', **kwargs)
|
fuzeman/trakt.py | trakt/objects/show.py | Show.episodes | python | def episodes(self):
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode | Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/show.py#L138-L148 | null | class Show(Media):
def __init__(self, client, keys, index=None):
super(Show, self).__init__(client, keys, index)
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.year = None
"""
:type: :class:`~python:int`
Year
"""
self.seasons = {}
"""
:type: :class:`~python:dict`
Seasons, defined as :code:`{season_num: Season}`
**Note:** this field might not be available with some methods
"""
self.watchers = None
"""
:type: :class:`~python:int`
Number of active watchers (returned by the :code:`Trakt['movies'].trending()`
and :code:`Trakt['shows'].trending()` methods)
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.airs = None
"""
:type: :class:`~python:dict`
Dictionary with day, time and timezone in which the show airs
"""
self.runtime = None
"""
:type: :class:`~python:int`
Duration (in minutes)
"""
self.certification = None
"""
:type: :class:`~python:str`
Content certification (e.g :code:`TV-MA`)
"""
self.network = None
"""
:type: :class:`~python:str`
Network in which the show is aired
"""
self.country = None
"""
:type: :class:`~python:str`
Country in which the show is aired
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.status = None
"""
:type: :class:`~python:str`
Value of :code:`returning series` (airing right now),
:code:`in production` (airing soon), :code:`planned` (in development),
:code:`canceled`, or :code:`ended`
"""
self.homepage = None
"""
:type: :class:`~python:str`
Homepage URL
"""
self.language = None
"""
:type: :class:`~python:str`
Language (for title, overview, etc..)
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
self.genres = None
"""
:type: :class:`~python:list`
Genres
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def to_identifier(self):
"""Return the show identifier which is compatible with requests that require show definitions.
:return: Show identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'ids': dict(self.keys),
'title': self.title,
'year': self.year
}
@deprecated('Show.to_info() has been moved to Show.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def to_dict(self):
"""Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict`
"""
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result
def _update(self, info=None, **kwargs):
if not info:
return
super(Show, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Trending
'watchers',
# Extended Info
'airs',
'runtime',
'certification',
'network',
'country',
'status',
'homepage',
'language',
'available_translations',
'genres',
'aired_episodes'
])
# Ensure `year` attribute is an integer (fixes incorrect type returned by search)
if info.get('year'):
self.year = int(info['year'])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
show = cls(client, keys, index=index)
show._update(info, **kwargs)
return show
def __repr__(self):
return '<Show %r (%s)>' % (self.title, self.year)
|
fuzeman/trakt.py | trakt/objects/show.py | Show.to_dict | python | def to_dict(self):
result = self.to_identifier()
result['seasons'] = [
season.to_dict()
for season in self.seasons.values()
]
result['in_watchlist'] = self.in_watchlist if self.in_watchlist is not None else 0
if self.rating:
result['rating'] = self.rating.value
result['rated_at'] = to_iso8601_datetime(self.rating.timestamp)
# Extended Info
if self.first_aired:
result['first_aired'] = to_iso8601_datetime(self.first_aired)
if self.updated_at:
result['updated_at'] = to_iso8601_datetime(self.updated_at)
if self.overview:
result['overview'] = self.overview
if self.airs:
result['airs'] = self.airs
if self.runtime:
result['runtime'] = self.runtime
if self.certification:
result['certification'] = self.certification
if self.network:
result['network'] = self.network
if self.country:
result['country'] = self.country
if self.status:
result['status'] = self.status
if self.homepage:
result['homepage'] = self.homepage
if self.language:
result['language'] = self.language
if self.available_translations:
result['available_translations'] = self.available_translations
if self.genres:
result['genres'] = self.genres
if self.aired_episodes:
result['aired_episodes'] = self.aired_episodes
return result | Dump show to a dictionary.
:return: Show dictionary
:rtype: :class:`~python:dict` | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/objects/show.py#L168-L231 | [
"def to_iso8601_datetime(value):\n if value is None:\n return None\n\n return value.strftime('%Y-%m-%dT%H:%M:%S') + '.000-00:00'\n",
"def to_identifier(self):\n \"\"\"Return the show identifier which is compatible with requests that require show definitions.\n\n :return: Show identifier/definit... | class Show(Media):
def __init__(self, client, keys, index=None):
super(Show, self).__init__(client, keys, index)
self.title = None
"""
:type: :class:`~python:str`
Title
"""
self.year = None
"""
:type: :class:`~python:int`
Year
"""
self.seasons = {}
"""
:type: :class:`~python:dict`
Seasons, defined as :code:`{season_num: Season}`
**Note:** this field might not be available with some methods
"""
self.watchers = None
"""
:type: :class:`~python:int`
Number of active watchers (returned by the :code:`Trakt['movies'].trending()`
and :code:`Trakt['shows'].trending()` methods)
"""
self.first_aired = None
"""
:type: :class:`~python:datetime.datetime`
First air date
"""
self.airs = None
"""
:type: :class:`~python:dict`
Dictionary with day, time and timezone in which the show airs
"""
self.runtime = None
"""
:type: :class:`~python:int`
Duration (in minutes)
"""
self.certification = None
"""
:type: :class:`~python:str`
Content certification (e.g :code:`TV-MA`)
"""
self.network = None
"""
:type: :class:`~python:str`
Network in which the show is aired
"""
self.country = None
"""
:type: :class:`~python:str`
Country in which the show is aired
"""
self.updated_at = None
"""
:type: :class:`~python:datetime.datetime`
Updated date/time
"""
self.status = None
"""
:type: :class:`~python:str`
Value of :code:`returning series` (airing right now),
:code:`in production` (airing soon), :code:`planned` (in development),
:code:`canceled`, or :code:`ended`
"""
self.homepage = None
"""
:type: :class:`~python:str`
Homepage URL
"""
self.language = None
"""
:type: :class:`~python:str`
Language (for title, overview, etc..)
"""
self.available_translations = None
"""
:type: :class:`~python:list`
Available translations (for title, overview, etc..)
"""
self.genres = None
"""
:type: :class:`~python:list`
Genres
"""
self.aired_episodes = None
"""
:type: :class:`~python:int`
Aired episode count
"""
def episodes(self):
"""Return a flat episode iterator.
:returns: Iterator :code:`((season_num, episode_num), Episode)`
:rtype: iterator
"""
for sk, season in iteritems(self.seasons):
# Yield each episode in season
for ek, episode in iteritems(season.episodes):
yield (sk, ek), episode
def to_identifier(self):
"""Return the show identifier which is compatible with requests that require show definitions.
:return: Show identifier/definition
:rtype: :class:`~python:dict`
"""
return {
'ids': dict(self.keys),
'title': self.title,
'year': self.year
}
@deprecated('Show.to_info() has been moved to Show.to_dict()')
def to_info(self):
"""**Deprecated:** use the :code:`to_dict()` method instead."""
return self.to_dict()
def _update(self, info=None, **kwargs):
if not info:
return
super(Show, self)._update(info, **kwargs)
update_attributes(self, info, [
'title',
# Trending
'watchers',
# Extended Info
'airs',
'runtime',
'certification',
'network',
'country',
'status',
'homepage',
'language',
'available_translations',
'genres',
'aired_episodes'
])
# Ensure `year` attribute is an integer (fixes incorrect type returned by search)
if info.get('year'):
self.year = int(info['year'])
# Extended Info
if 'first_aired' in info:
self.first_aired = from_iso8601_datetime(info.get('first_aired'))
if 'updated_at' in info:
self.updated_at = from_iso8601_datetime(info.get('updated_at'))
@classmethod
def _construct(cls, client, keys, info=None, index=None, **kwargs):
show = cls(client, keys, index=index)
show._update(info, **kwargs)
return show
def __repr__(self):
return '<Show %r (%s)>' % (self.title, self.year)
|
fuzeman/trakt.py | trakt/core/request.py | TraktRequest.construct_url | python | def construct_url(self):
path = [self.path]
path.extend(self.params)
# Build URL
url = self.client.base_url + '/'.join(
str(value) for value in path
if value
)
# Append query parameters (if defined)
query = self.encode_query(self.query)
if query:
url += '?' + query
return url | Construct a full trakt request URI, with `params` and `query`. | train | https://github.com/fuzeman/trakt.py/blob/14c6b72e3c13ea2975007aeac0c01ad2222b67f3/trakt/core/request.py#L101-L118 | [
"def encode_query(cls, parameters):\n if not parameters:\n return ''\n\n return urlencode([\n (key, cls.encode_query_parameter(value))\n for key, value in parameters.items()\n if value is not None\n ])\n"
] | class TraktRequest(object):
def __init__(self, client, **kwargs):
self.client = client
self.configuration = client.configuration
self.kwargs = kwargs
self.request = None
# Parsed Attributes
self.path = None
self.params = None
self.query = None
self.data = None
self.method = None
def prepare(self):
self.request = Request()
self.transform_parameters()
self.request.url = self.construct_url()
self.request.method = self.transform_method()
self.request.headers = self.transform_headers()
data = self.transform_data()
if data:
self.request.data = json.dumps(data)
return self.request.prepare()
def transform_parameters(self):
# Transform `path`
self.path = self.kwargs.get('path')
if not self.path.startswith('/'):
self.path = '/' + self.path
if self.path.endswith('/'):
self.path = self.path[:-1]
# Transform `params` into list
self.params = self.kwargs.get('params') or []
if isinstance(self.params, six.string_types):
self.params = [self.params]
# Transform `query`
self.query = self.kwargs.get('query') or {}
def transform_method(self):
self.method = self.kwargs.get('method')
# Pick `method` (if not provided)
if not self.method:
self.method = 'POST' if self.data else 'GET'
return self.method
def transform_headers(self):
headers = self.kwargs.get('headers') or {}
headers['Content-Type'] = 'application/json'
headers['trakt-api-version'] = '2'
# API Key / Client ID
if self.client.configuration['client.id']:
headers['trakt-api-key'] = self.client.configuration['client.id']
# xAuth
if self.configuration['auth.login'] and self.configuration['auth.token']:
headers['trakt-user-login'] = self.configuration['auth.login']
headers['trakt-user-token'] = self.configuration['auth.token']
# OAuth
if self.configuration['oauth.token']:
headers['Authorization'] = 'Bearer %s' % self.configuration['oauth.token']
# User-Agent
if self.configuration['app.name'] and self.configuration['app.version']:
headers['User-Agent'] = '%s (%s)' % (self.configuration['app.name'], self.configuration['app.version'])
elif self.configuration['app.name']:
headers['User-Agent'] = self.configuration['app.name']
else:
headers['User-Agent'] = 'trakt.py (%s)' % self.client.version
return headers
def transform_data(self):
return self.kwargs.get('data') or None
@classmethod
def encode_query(cls, parameters):
if not parameters:
return ''
return urlencode([
(key, cls.encode_query_parameter(value))
for key, value in parameters.items()
if value is not None
])
@classmethod
def encode_query_parameter(cls, value):
# Encode tuple into range string
if isinstance(value, tuple):
if len(value) != 2:
raise ValueError('Invalid tuple parameter (expected 2-length tuple)')
return '%s-%s' % value
# Encode list into comma-separated string
if isinstance(value, list):
return ','.join([
cls.encode_query_parameter(item)
for item in value
])
# Ensure values are strings
return str(value)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.